gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
|---|---|
# ocean
import json
import random
import os
import time
from datetime import datetime
from itertools import chain
from django.shortcuts import render
from django.core import serializers
from django.http import HttpResponse
from django.http import JsonResponse
import django_filters.rest_framework
from django.core.cache import cache
from django.test.utils import override_settings
from django.contrib.gis.geos import Point
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.geos import Polygon, MultiPolygon
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import generics
from celery.result import AsyncResult
from .models import *
from .serializers import *
from app.settings import DEBUG
def test(request, attribute):
day = '2016-09-01'
date = '2016-09-01 00:00:00'
model = attribute
data = {'content': 'Test',}
data["attribute"] = attribute
model = 'Chlorophyll'
model = 'SeaSurfaceTemperature'
# model = 'SeaIceCover'
# Getting the region value
try:
region = Region.objects.get(slug=region)
except:
region = Region.objects.get(slug='default')
start_time = time.time()
print('Getting cache...')
# Updating day cache and get cached result
# cacheResult = create_qs_cache_byday_and_model(day, model)
print("--- %s seconds ---" % (time.time() - start_time))
start_time = time.time()
startDate = day + ' 00:00:00'
endDate = day + ' 23:59:59'
print('Making query...')
qs = eval(model).objects.filter(
# qs = cacheResult.filter(
datetime__range=(startDate, endDate),
point__intersects=str(region.mpoly),
# )[0:2000000]
)[0:1000]
serializer = eval(model + 'Serializer')(qs, many=True)
data['qs'] = serializer.data
print("--- %s seconds ---" % (time.time() - start_time))
return JsonResponse(data, safe=False)
# return JsonResponse({}, safe=False)
# Creating chache for query set object
# for default region by given date and model
def create_qs_cache_byday_and_model(date, model, renew=False):
date = str(date)
model = str(model)
dateTime = date + ' 00:00:00'
# Generating query set cache key
cacheKey = 'qs' + date + model
# Trying to get cached data
if not cache.get(cacheKey) or renew:
# If cache data in not set, making query
print("Making day cache...")
region = Region.objects.get(slug='default')
qs = eval(model).objects.filter(
datetime=dateTime,
point__intersects=str(region.mpoly),
)
cache.set(cacheKey, qs)
else:
print("Got the cached data from memcached")
return cache.get(cacheKey)
def get_filter(request):
data = {'regions': [], 'characteristics': [],}
qs = Region.objects.all()
serializer = RegionSerializer(qs, many=True)
data['regions'] = serializer.data
qs = Characteristic.objects.all()
serializer = CharacteristicSerializer(qs, many=True)
data['characteristics'] = serializer.data
return JsonResponse(data, safe=False)
def api_get_queryset(
model, _date, region, needed, createDump=True):
# Returned result array
result = []
# Convert date to datetime format
date = _date + ' 00:00:00'
# Max number of DB selects have to be done
select = 20 # select = int(select)
# Number of points you need
needed = int(needed)
if needed < 500:
needed = 500
# Getting the region value
try:
region = Region.objects.get(slug=region)
except:
region = Region.objects.get(slug='default')
"""
# Creating query dump task if needed
if createDump:
task = create_dump_task.delay(
model, date, str(region), str(region.mpoly))
"""
# print("ID", task.task_id)
# taskResult = AsyncResult(task.task_id)
# time.sleep(5)
# print(taskResult.ready())
# print(task.AsyncResult(task.request.id).state)
# return result # 4 testing
# Generating query set cache key
cacheKey = str(_date) + '-' + str(region.slug) \
+ '-' + str(model) \
+ '-' + str(needed) + '-' \
+ str(select)
# Define main DB query, execution is delayed
qsTotal = eval(model).objects.filter(
datetime=date,
point__intersects=region.mpoly,
)
# Total points found using query filter
total = qsTotal.count()
print("Total points found:", total) # 4 testing
# Exit if no points found
if total < 1:
print("No points found")
return result
# Getting points queryset
# using normalization algorithm
step = int(round(total / select, -1))
pull = int(round(step / (total / needed), -1))
randStep = step - pull
randStep = randStep if randStep > 0 else step
# Trying to get cached data
result = cache.get(cacheKey)
# If cache data in not set, making query
if not result:
result = []
print("Making query...")
for i in range(select):
shift = i * step + random.randint(0, randStep)
print("Query shift:", shift)
qs = qsTotal.filter()[shift:shift + pull]
result = list(chain(result, qs))
cache.set(cacheKey, result)
else:
print("Got the cached data from memcached")
print("Points selected:", len(result))
return result
class ApiChlor(generics.ListCreateAPIView):
serializer_class = ChlorophyllSerializer
def get_queryset(self):
qs = api_get_queryset(
'Chlorophyll',
self.kwargs['date'],
self.kwargs['region'],
self.kwargs['needed'],
)
return qs
class ApiSST(generics.ListCreateAPIView):
serializer_class = SSTSerializer
def get_queryset(self):
qs = api_get_queryset(
'SST',
self.kwargs['date'],
self.kwargs['region'],
self.kwargs['needed'],
)
return qs
class ApiBath(generics.ListCreateAPIView):
serializer_class = BathymetricSerializer
def get_queryset(self):
qs = api_get_queryset(
'Bathymetric',
'2015-03-18',
self.kwargs['region'],
self.kwargs['needed'],
)
return qs
class CharacteristicsList(generics.ListCreateAPIView):
queryset = Characteristic.objects.all()
serializer_class = CharacteristicSerializer
class CharacteristicsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Characteristic.objects.all()
serializer_class = CharacteristicSerializer
class RegionsList(generics.ListCreateAPIView):
queryset = Region.objects.all()
serializer_class = RegionSerializer
class RegionsDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Region.objects.all()
serializer_class = RegionSerializer
def regions_detail(request, argument):
data = {}
try:
pk = int(argument)
qs = Region.objects.filter(pk=pk)
except:
slug = str(argument)
qs = Region.objects.filter(slug=slug)
serializer = RegionSerializer(qs, many=True)
data = serializer.data[0]
return JsonResponse(data, safe=False)
|
|
import numpy as np
import dask
import dask.array as dsa
from dask.base import tokenize, normalize_token
import xarray as xr
import warnings
from .duck_array_ops import concatenate
from .shrunk_index import all_index_data
def _get_grid_metadata():
# keep this separate from get_var_metadata
# because grid stuff is weird
from ..mds_store import _get_all_grid_variables
from ..variables import extra_grid_variables, vertical_coordinates
# get grid info
grid_vars = _get_all_grid_variables('llc')
grid_vars.update(extra_grid_variables)
# make dictionary with keys as filenames
grid_metadata = {}
for key,val in grid_vars.items():
# masks use hFac filename to be computed in mds_store
if 'filename' in val and key[:4]!='mask':
val.update({'real_name':key})
grid_metadata[val['filename']] = val
else:
grid_metadata[key] = val
# force RF to point to Zp1, deal with this manually..
grid_metadata['RF']=vertical_coordinates['Zp1']
grid_metadata['RF']['real_name'] = 'Zp1'
for zv in ['Zu','Zl']:
grid_metadata[zv] = vertical_coordinates[zv]
return grid_metadata
def _get_var_metadata():
# The LLC run data comes with zero metadata. So we import metadata from
# the xmitgcm package.
from ..variables import state_variables, package_state_variables
from ..utils import parse_available_diagnostics
from ..default_diagnostics import diagnostics
from io import StringIO
diag_file = StringIO(diagnostics)
available_diags = parse_available_diagnostics(diag_file)
var_metadata = state_variables.copy()
var_metadata.update(package_state_variables)
var_metadata.update(available_diags)
# even the file names from the LLC data differ from standard MITgcm output
aliases = {'Eta': 'ETAN', 'PhiBot': 'PHIBOT', 'Salt': 'SALT',
'Theta': 'THETA'}
for a, b in aliases.items():
var_metadata[a] = var_metadata[b]
# add grid metadata
var_metadata.update(_get_grid_metadata())
return var_metadata
_VAR_METADATA = _get_var_metadata()
def _is_vgrid(vname):
# check for 1d, vertical grid variables
dims = _VAR_METADATA[vname]['dims']
return len(dims)==1 and dims[0][0]=='k'
def _get_variable_point(vname, mask_override):
# fix for https://github.com/MITgcm/xmitgcm/issues/191
if vname in mask_override:
return mask_override[vname]
dims = _VAR_METADATA[vname]['dims']
if 'i' in dims and 'j' in dims:
point = 'c'
elif 'i_g' in dims and 'j' in dims:
point = 'w'
elif 'i' in dims and 'j_g' in dims:
point = 's'
elif 'i_g' in dims and 'j_g' in dims:
raise ValueError("Don't have masks for corner points!")
else:
raise ValueError("Variable `%s` is not a horizontal variable." % vname)
return point
def _get_scalars_and_vectors(varnames, type):
for vname in varnames:
if vname not in _VAR_METADATA:
raise ValueError("Varname `%s` not found in metadata." % vname)
if type != 'latlon':
return varnames, []
scalars = []
vector_pairs = []
for vname in varnames:
meta = _VAR_METADATA[vname]
try:
mate = meta['attrs']['mate']
if mate not in varnames:
raise ValueError("Vector pairs are required to create "
"latlon type datasets. Varname `%s` is "
"missing its vector mate `%s`"
% vname, mate)
vector_pairs.append((vname, mate))
varnames.remove(mate)
except KeyError:
scalars.append(vname)
def _decompress(data, mask, dtype):
data_blank = np.full_like(mask, np.nan, dtype=dtype)
data_blank[mask] = data
data_blank.shape = mask.shape
return data_blank
_facet_strides = ((0,3), (3,6), (6,7), (7,10), (10,13))
# whether to reshape each face
_facet_reshape = (False, False, False, True, True)
_nfaces = 13
_nfacets = 5
def _uncompressed_facet_index(nfacet, nside):
face_size = nside**2
start = _facet_strides[nfacet][0] * face_size
end = _facet_strides[nfacet][1] * face_size
return start, end
def _facet_shape(nfacet, nside):
facet_length = _facet_strides[nfacet][1] - _facet_strides[nfacet][0]
if _facet_reshape[nfacet]:
facet_shape = (1, nside, facet_length*nside)
else:
facet_shape = (1, facet_length*nside, nside)
return facet_shape
def _facet_to_faces(data, nfacet):
shape = data.shape
# facet dimension
nf, ny, nx = shape[-3:]
other_dims = shape[:-3]
assert nf == 1
facet_length = _facet_strides[nfacet][1] - _facet_strides[nfacet][0]
if _facet_reshape[nfacet]:
new_shape = other_dims + (ny, facet_length, nx / facet_length)
data_rs = data.reshape(new_shape)
data_rs = np.moveaxis(data_rs, -2, -3) # dask-safe
else:
new_shape = other_dims + (facet_length, ny / facet_length, nx)
data_rs = data.reshape(new_shape)
return data_rs
def _facets_to_faces(facets):
all_faces = []
for nfacet, data_facet in enumerate(facets):
data_rs = _facet_to_faces(data_facet, nfacet)
all_faces.append(data_rs)
return concatenate(all_faces, axis=-3)
def _faces_to_facets(data, facedim=-3):
assert data.shape[facedim] == _nfaces
facets = []
for nfacet, (strides, reshape) in enumerate(zip(_facet_strides, _facet_reshape)):
face_data = [data[(...,) + (slice(nface, nface+1), slice(None), slice(None))]
for nface in range(*strides)]
if reshape:
concat_axis = facedim + 2
else:
concat_axis = facedim + 1
# todo: use duck typing for concat
facet_data = concatenate(face_data, axis=concat_axis)
facets.append(facet_data)
return facets
def _rotate_scalar_facet(facet):
facet_transposed = np.moveaxis(facet, -1, -2)
facet_rotated = np.flip(facet_transposed, -2)
return facet_rotated
def _facets_to_latlon_scalar(all_facets):
rotated = (all_facets[:2]
+ [_rotate_scalar_facet(facet) for facet in all_facets[-2:]])
# drop facet dimension
rotated = [r[..., 0, :, :] for r in rotated]
return concatenate(rotated, axis=-1)
def _faces_to_latlon_scalar(data):
data_facets = _faces_to_facets(data)
return _facets_to_latlon_scalar(data_facets)
# dask's pad function doesn't work
# it does weird things to non-pad dimensions
# need to roll our own
def shift_and_pad(a):
a_shifted = a[..., 1:]
pad_array = dsa.zeros_like(a[..., -2:-1])
return concatenate([a_shifted, pad_array], axis=-1)
def transform_v_to_u(facet):
return _rotate_scalar_facet(facet)
def transform_u_to_v(facet, metric=False):
# "shift" u component by 1 pixel
pad_width = (facet.ndim - 1) * (None,) + ((1, 0),)
#facet_padded = dsa.pad(facet[..., 1:], pad_width, 'constant')
facet_padded = shift_and_pad(facet)
assert facet.shape == facet_padded.shape
facet_rotated = _rotate_scalar_facet(facet_padded)
if not metric:
facet_rotated = -facet_rotated
return facet_rotated
def _facets_to_latlon_vector(facets_u, facets_v, metric=False):
# need to pad the rotated v values
ndim = facets_u[0].ndim
# second-to-last axis is the one to pad, plus a facet axis
assert ndim >= 3
# drop facet dimension
facets_u_drop = [f[..., 0, :, :] for f in facets_u]
facets_v_drop = [f[..., 0, :, :] for f in facets_v]
u_rot = (facets_u_drop[:2]
+ [transform_v_to_u(facet) for facet in facets_v_drop[-2:]])
v_rot = (facets_v_drop[:2]
+ [transform_u_to_v(facet, metric) for facet in facets_u_drop[-2:]])
u = concatenate(u_rot, axis=-1)
v = concatenate(v_rot, axis=-1)
return u, v
def _faces_to_latlon_vector(u_faces, v_faces, metric=False):
u_facets = _faces_to_facets(u_faces)
v_facets = _faces_to_facets(v_faces)
u, v = _facets_to_latlon_vector(u_facets, v_facets, metric=metric)
return u, v
def _drop_facedim(dims):
dims = list(dims)
dims.remove('face')
return dims
def _add_face_to_dims(dims):
new_dims = dims.copy()
if 'j' in dims:
j_dim = dims.index('j')
new_dims.insert(j_dim, 'face')
elif 'j_g' in dims:
j_dim = dims.index('j_g')
new_dims.insert(j_dim, 'face')
return new_dims
def _faces_coords_to_latlon(ds):
coords = ds.reset_coords().coords.to_dataset()
ifac = 4
jfac = 3
dim_coords = {}
for vname in coords.coords:
if vname[0] == 'i':
data = np.arange(ifac * coords.dims[vname])
elif vname[0] == 'j':
data = np.arange(jfac * coords.dims[vname])
else:
data = coords[vname].data
var = xr.Variable(ds[vname].dims, data, ds[vname].attrs)
dim_coords[vname] = var
return xr.Dataset(dim_coords)
def faces_dataset_to_latlon(ds, metric_vector_pairs=[('dxC', 'dyC'), ('dyG', 'dxG')]):
"""Transform a 13-face LLC xarray Dataset into a rectancular grid,
discarding the Arctic.
Parameters
----------
ds : xarray.Dataset
A 13-face LLC dataset
metric_vector_pairs : list, optional
Pairs of variables that are positive-definite metrics located at grid
edges.
Returns
-------
out : xarray.Dataset
Transformed rectangular dataset
"""
coord_vars = list(ds.coords)
ds_new = _faces_coords_to_latlon(ds)
vector_pairs = []
scalars = []
vnames = list(ds.reset_coords().variables)
for vname in vnames:
try:
mate = ds[vname].attrs['mate']
vector_pairs.append((vname, mate))
vnames.remove(mate)
except KeyError:
pass
all_vector_components = [inner for outer in (vector_pairs + metric_vector_pairs)
for inner in outer]
scalars = [vname for vname in vnames if vname not in all_vector_components]
data_vars = {}
for vname in scalars:
if vname=='face' or vname in ds_new:
continue
if 'face' in ds[vname].dims:
data = _faces_to_latlon_scalar(ds[vname].data)
dims = _drop_facedim(ds[vname].dims)
else:
data = ds[vname].data
dims = ds[vname].dims
data_vars[vname] = xr.Variable(dims, data, ds[vname].attrs)
for vname_u, vname_v in vector_pairs:
data_u, data_v = _faces_to_latlon_vector(ds[vname_u].data, ds[vname_v].data)
data_vars[vname_u] = xr.Variable(_drop_facedim(ds[vname_u].dims), data_u, ds[vname_u].attrs)
data_vars[vname_v] = xr.Variable(_drop_facedim(ds[vname_v].dims), data_v, ds[vname_v].attrs)
for vname_u, vname_v in metric_vector_pairs:
data_u, data_v = _faces_to_latlon_vector(ds[vname_u].data, ds[vname_v].data, metric=True)
data_vars[vname_u] = xr.Variable(_drop_facedim(ds[vname_u].dims), data_u, ds[vname_u].attrs)
data_vars[vname_v] = xr.Variable(_drop_facedim(ds[vname_v].dims), data_v, ds[vname_v].attrs)
ds_new = ds_new.update(data_vars)
ds_new = ds_new.set_coords([c for c in coord_vars if c in ds_new])
return ds_new
# below are data transformers
def _all_facets_to_faces(data_facets, meta):
return {vname: _facets_to_faces(data)
for vname, data in data_facets.items()}
def _all_facets_to_latlon(data_facets, meta):
vector_pairs = []
scalars = []
vnames = list(data_facets)
for vname in vnames:
try:
mate = meta[vname]['attrs']['mate']
vector_pairs.append((vname, mate))
vnames.remove(mate)
except KeyError:
pass
all_vector_components = [inner for outer in vector_pairs for inner in outer]
scalars = [vname for vname in vnames if vname not in all_vector_components]
data = {}
for vname in scalars:
data[vname] = _facets_to_latlon_scalar(data_facets[vname])
for vname_u, vname_v in vector_pairs:
data_u, data_v = _facets_to_latlon_vector(data_facets[vname_u],
data_facets[vname_v])
data[vname_u] = data_u
data[vname_v] = data_v
return data
def _chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def _get_facet_chunk(store, varname, iternum, nfacet, klevels, nx, nz, dtype,
mask_override):
fs, path = store.get_fs_and_full_path(varname, iternum)
assert (nfacet >= 0) & (nfacet < _nfacets)
file = fs.open(path)
# insert singleton axis for time (if not grid var) and k level
facet_shape = (1,) + _facet_shape(nfacet, nx)
facet_shape = (1,) + facet_shape if iternum is not None else facet_shape
level_data = []
# the store tells us whether we need a mask or not
point = _get_variable_point(varname, mask_override)
if (store.shrunk and iternum is not None) or \
(store.shrunk_grid and iternum is None):
index = all_index_data[nx][point]
zgroup = store.open_mask_group()
mask = zgroup['mask_' + point].astype('bool')
else:
index = None
mask = None
for k in klevels:
assert (k >= 0) & (k < nz)
# figure out where in the file we have to read to get the data
# for this level and facet
if index:
i = np.ravel_multi_index((k, nfacet), (nz, _nfacets))
start = index[i]
end = index[i+1]
else:
level_start = k * nx**2 * _nfaces
facet_start, facet_end = _uncompressed_facet_index(nfacet, nx)
start = level_start + facet_start
end = level_start + facet_end
read_offset = start * dtype.itemsize # in bytes
read_length = (end - start) * dtype.itemsize # in bytes
file.seek(read_offset)
buffer = file.read(read_length)
data = np.frombuffer(buffer, dtype=dtype)
assert len(data) == (end - start)
if mask:
mask_level = mask[k]
mask_facets = _faces_to_facets(mask_level)
this_mask = mask_facets[nfacet]
data = _decompress(data, this_mask, dtype)
# this is the shape this facet is supposed to have
data.shape = facet_shape
level_data.append(data)
return np.concatenate(level_data, axis=1)
def _get_1d_chunk(store, varname, klevels, nz, dtype):
"""for 1D vertical grid variables"""
fs, path = store.get_fs_and_full_path(varname, None)
file = fs.open(path)
# read all levels for 1D variables
read_length = nz*dtype.itemsize # all levels in bytes
buffer = file.read(read_length)
data = np.frombuffer(buffer,dtype=dtype)
# now subset
return data[klevels]
class BaseLLCModel:
"""Class representing an LLC Model Dataset.
Parameters
----------
store : llcreader.BaseStore
The store object where the data can be found
mask_ds : zarr.Group
Must contain variables `mask_c`, `masc_w`, `mask_s`
Attributes
----------
dtype : numpy.dtype
Datatype of the data in the dataset
nx : int
Number of gridpoints per face (e.g. 90, 1080, 4320, etc.)
nz : int
Number of vertical gridpoints
delta_t : float
Numerical timestep
time_units : str
Date unit string, e.g 'seconds since 1948-01-01 12:00:00'
iter_start : int
First model iteration number (inclusive; follows python range conventions)
iter_stop : int
Final model iteration number (exclusive; follows python range conventions)
iter_step : int
Spacing between iterations
varnames, grid_varnames : list
List of data variable and grid variable names contained in the dataset
mask_override : dict
Override inference of masking variable, e.g. ``{'oceTAUX': 'c'}``
"""
nface = 13
dtype = np.dtype('>f4')
# should be implemented by child classes
nx = None
nz = None
delta_t = None
time_units = None
iter_start = None
iter_stop = None
iter_step = None
varnames = []
grid_varnames = []
mask_override = {}
def __init__(self, store):
"""Initialize model
Parameters
----------
store : llcreader.BaseStore
mask_ds : zarr.Group
Must contain variables `mask_c`, `mask_w`, `mask_s`
"""
self.store = store
self.shape = (self.nz, self.nface, self.nx, self.nx)
if self.store.shrunk:
self.masks = self._get_masks()
from .shrunk_index import all_index_data
self.indexes = all_index_data[self.nx]
else:
self.masks = None
self.indexes = None
def _get_masks(self):
masks = {}
zgroup = self.store.open_mask_group()
for point in ['c', 'w', 's']:
mask_faces = dsa.from_zarr(zgroup['mask_' + point]).astype('bool')
masks[point] = _faces_to_facets(mask_faces)
return masks
def _get_kp1_levels(self,k_levels):
# determine kp1 levels
# get borders to all k (center) levels
# ki used to get Zu, Zl later
ku = np.concatenate([k_levels[1:],[k_levels[-1]+1]])
kp1 = []
ki=[]
for i,(x,y) in enumerate(zip(k_levels,ku)):
kp1+= [x] if x not in kp1 else []
kp1+= [y] if y-x==1 else [x+1]
kp1=np.array(kp1)
return kp1
def _make_coords_faces(self, all_iters):
time = self.delta_t * all_iters
time_attrs = {'units': self.time_units,
'calendar': self.calendar}
coords = {'face': ('face', np.arange(self.nface)),
'i': ('i', np.arange(self.nx)),
'i_g': ('i_g', np.arange(self.nx)),
'j': ('j', np.arange(self.nx)),
'j_g': ('j_g', np.arange(self.nx)),
'k': ('k', np.arange(self.nz)),
'k_u': ('k_u', np.arange(self.nz)),
'k_l': ('k_l', np.arange(self.nz)),
'k_p1': ('k_p1', np.arange(self.nz + 1)),
'niter': ('time', all_iters),
'time': ('time', time, time_attrs)
}
return xr.decode_cf(xr.Dataset(coords=coords))
def _make_coords_latlon():
ds = self._make_coords_faces(self)
return _faces_coords_to_latlon(ds)
def _get_mask_and_index_for_variable(self, vname):
if self.masks is None:
return None, None
dims = _VAR_METADATA[vname]['dims']
if 'i' in dims and 'j' in dims:
point = 'c'
elif 'i_g' in dims and 'j' in dims:
point = 'w'
elif 'i' in dims and 'j_g' in dims:
point = 's'
elif 'i_g' in dims and 'j_g' in dims:
raise ValueError("Don't have masks for corner points!")
else:
# this is not a 2D variable
return None, None
mask = self.masks[point]
index = self.indexes[point]
return mask, index
def _dask_array(self, nfacet, varname, iters, klevels, k_chunksize):
# return a dask array for a single facet
facet_shape = _facet_shape(nfacet, self.nx)
time_chunks = (len(iters) * (1,),) if iters is not None else ()
k_chunks = (tuple([len(c)
for c in _chunks(klevels, k_chunksize)]),)
chunks = time_chunks + k_chunks + tuple([(s,) for s in facet_shape])
# manually build dask graph
dsk = {}
token = tokenize(varname, self.store, nfacet)
name = '-'.join([varname, token])
# iters == None for grid variables
if iters is not None:
for n_iter, iternum in enumerate(iters):
for n_k, these_klevels in enumerate(_chunks(klevels, k_chunksize)):
key = name, n_iter, n_k, 0, 0, 0
task = (_get_facet_chunk, self.store, varname, iternum,
nfacet, these_klevels, self.nx, self.nz, self.dtype,
self.mask_override)
dsk[key] = task
else:
for n_k, these_klevels in enumerate(_chunks(klevels, k_chunksize)):
key = name, n_k, 0, 0, 0
task = (_get_facet_chunk, self.store, varname, None,
nfacet, these_klevels, self.nx, self.nz, self.dtype,
self.mask_override)
dsk[key] = task
return dsa.Array(dsk, name, chunks, self.dtype)
def _dask_array_vgrid(self, varname, klevels, k_chunksize):
# return a dask array for a 1D vertical grid var
# single chunk for 1D variables
chunks = ((len(klevels),),)
# manually build dask graph
dsk = {}
token = tokenize(varname, self.store)
name = '-'.join([varname, token])
nz = self.nz if _VAR_METADATA[varname]['dims'] != ['k_p1'] else self.nz+1
task = (_get_1d_chunk, self.store, varname,
list(klevels), nz, self.dtype)
key = name, 0
dsk[key] = task
return dsa.Array(dsk, name, chunks, self.dtype)
def _get_facet_data(self, varname, iters, klevels, k_chunksize):
mask, index = self._get_mask_and_index_for_variable(varname)
# needs facets to be outer index of nested lists
dims = _VAR_METADATA[varname]['dims']
if len(dims)==2:
klevels = [0,]
if _is_vgrid(varname):
data_facets = self._dask_array_vgrid(varname,klevels,k_chunksize)
else:
data_facets = [self._dask_array(nfacet, varname, iters, klevels, k_chunksize)
for nfacet in range(5)]
if len(dims)==2:
# squeeze depth dimension out of 2D variable
data_facets = [facet[..., 0, :, :, :] for facet in data_facets]
return data_facets
def get_dataset(self, varnames=None, iter_start=None, iter_stop=None,
iter_step=None, k_levels=None, k_chunksize=1,
type='faces', read_grid=True, grid_vars_to_coords=True):
"""
Create an xarray Dataset object for this model.
Parameters
----------
*varnames : list of strings, optional
The variables to include, e.g. ``['Salt', 'Theta']``. Otherwise
include all known variables.
iter_start : int, optional
Starting iteration number. Otherwise use model default.
Follows standard `range` conventions. (inclusive)
iter_start : int, optional
Stopping iteration number. Otherwise use model default.
Follows standard `range` conventions. (exclusive)
iter_step : int, optional
Iteration number stepsize. Otherwise use model default.
k_levels : list of ints, optional
Vertical levels to extract. Default is to get them all
k_chunksize : int, optional
How many vertical levels per Dask chunk.
type : {'faces', 'latlon'}, optional
What type of dataset to create
read_grid : bool, optional
Whether to read the grid info
grid_vars_to_coords : bool, optional
Whether to promote grid variables to coordinate status
Returns
-------
ds : xarray.Dataset
"""
def _if_not_none(a, b):
if a is None:
return b
else:
return a
iter_start = _if_not_none(iter_start, self.iter_start)
iter_stop = _if_not_none(iter_stop, self.iter_stop)
iter_step = _if_not_none(iter_step, self.iter_step)
iter_params = [iter_start, iter_stop, iter_step]
if any([a is None for a in iter_params]):
raise ValueError("The parameters `iter_start`, `iter_stop` "
"and `iter_step` must be defined either by the "
"model class or as argument. Instead got %r "
% iter_params)
iters = np.arange(*iter_params)
varnames = varnames or self.varnames
# grid stuff
read_grid = read_grid and len(self.grid_varnames)!=0
grid_vars_to_coords = grid_vars_to_coords and read_grid
grid_varnames = self.grid_varnames if read_grid else []
ds = self._make_coords_faces(iters)
if type=='latlon':
ds = _faces_coords_to_latlon(ds)
k_levels = k_levels or np.arange(self.nz)
kp1_levels = self._get_kp1_levels(k_levels)
ds = ds.sel(k=k_levels, k_l=k_levels, k_u=k_levels, k_p1=kp1_levels)
# get the data in facet form
data_facets = {vname:
self._get_facet_data(vname, iters, k_levels, k_chunksize)
for vname in varnames}
# get the grid in facet form
# do separately for vertical coords on kp1_levels
grid_facets = {}
for vname in grid_varnames:
my_k_levels = k_levels if _VAR_METADATA[vname]['dims'] !=['k_p1'] else kp1_levels
grid_facets[vname] = self._get_facet_data(vname, None, my_k_levels, k_chunksize)
# transform it into faces or latlon
data_transformers = {'faces': _all_facets_to_faces,
'latlon': _all_facets_to_latlon}
transformer = data_transformers[type]
data = transformer(data_facets, _VAR_METADATA)
# separate horizontal and vertical grid variables
hgrid_facets = {key: grid_facets[key]
for key in grid_varnames if not _is_vgrid(key)}
vgrid_facets = {key: grid_facets[key]
for key in grid_varnames if _is_vgrid(key)}
# do not transform vertical grid variables
data.update(transformer(hgrid_facets, _VAR_METADATA))
data.update(vgrid_facets)
variables = {}
gridlist = ['Zl','Zu'] if read_grid else []
for vname in varnames+grid_varnames:
meta = _VAR_METADATA[vname]
dims = meta['dims']
if type=='faces':
dims = _add_face_to_dims(dims)
dims = ['time',] + dims if vname not in grid_varnames else dims
attrs = meta['attrs']
# Handle grid names different from filenames
fname = vname
vname = meta['real_name'] if 'real_name' in meta else vname
if fname in grid_varnames:
gridlist.append(vname)
variables[vname] = xr.Variable(dims, data[fname], attrs)
# handle vertical coordinate after the fact
if read_grid and 'RF' in grid_varnames:
ki = np.array([list(kp1_levels).index(x) for x in k_levels])
for zv,sl in zip(['Zl','Zu'],[ki,ki+1]):
variables[zv] = xr.Variable(_VAR_METADATA[zv]['dims'],
data['RF'][sl],
_VAR_METADATA[zv]['attrs'])
ds = ds.update(variables)
if grid_vars_to_coords:
ds = ds.set_coords(gridlist)
return ds
|
|
import pytest
from pytestqt.qt_compat import qt_api
from pytestqt import modeltest
pytestmark = pytest.mark.usefixtures("qtbot")
class BasicModel(qt_api.QtCore.QAbstractItemModel):
def data(self, index, role=qt_api.QtCore.Qt.DisplayRole):
return None
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 0
def columnCount(self, parent=qt_api.QtCore.QModelIndex()):
return 0
def index(self, row, column, parent=qt_api.QtCore.QModelIndex()):
return qt_api.QtCore.QModelIndex()
def parent(self, index):
return qt_api.QtCore.QModelIndex()
def test_standard_item_model(qtmodeltester):
"""
Basic test which uses qtmodeltester with a qt_api.QStandardItemModel.
"""
model = qt_api.QStandardItemModel()
items = [qt_api.QStandardItem(str(i)) for i in range(6)]
model.setItem(0, 0, items[0])
model.setItem(0, 1, items[1])
model.setItem(1, 0, items[2])
model.setItem(1, 1, items[3])
items[0].setChild(0, items[4])
items[4].setChild(0, items[5])
qtmodeltester.check(model, force_py=True)
def test_string_list_model(qtmodeltester):
model = qt_api.QStringListModel()
model.setStringList(["hello", "world"])
qtmodeltester.check(model, force_py=True)
def test_sort_filter_proxy_model(qtmodeltester):
model = qt_api.QStringListModel()
model.setStringList(["hello", "world"])
proxy = qt_api.QSortFilterProxyModel()
proxy.setSourceModel(model)
qtmodeltester.check(proxy, force_py=True)
@pytest.mark.parametrize(
"broken_role",
[
qt_api.QtCore.Qt.ToolTipRole,
qt_api.QtCore.Qt.StatusTipRole,
qt_api.QtCore.Qt.WhatsThisRole,
qt_api.QtCore.Qt.SizeHintRole,
qt_api.QtCore.Qt.FontRole,
qt_api.QtCore.Qt.BackgroundColorRole,
qt_api.QtCore.Qt.TextColorRole,
qt_api.QtCore.Qt.TextAlignmentRole,
qt_api.QtCore.Qt.CheckStateRole,
],
)
def test_broken_types(check_model, broken_role):
"""
Check that qtmodeltester correctly captures data() returning invalid
values for various display roles.
"""
class BrokenTypeModel(qt_api.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
if parent == qt_api.QtCore.QModelIndex():
return 1
else:
return 0
def data(
self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole
):
if role == broken_role:
return object() # This will fail the type check for any role
else:
return None
check_model(BrokenTypeModel(), should_pass=False)
@pytest.mark.parametrize(
"role_value, should_pass",
[
(qt_api.QtCore.Qt.AlignLeft, True),
(qt_api.QtCore.Qt.AlignRight, True),
(0xFFFFFF, False),
("foo", False),
(object(), False),
],
)
def test_data_alignment(role_value, should_pass, check_model):
"""Test a custom model which returns a good and alignments from data().
qtmodeltest should capture this problem and fail when that happens.
"""
class MyModel(qt_api.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 1 if parent == qt_api.QtCore.QModelIndex() else 0
def data(
self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole
):
if role == qt_api.QtCore.Qt.TextAlignmentRole:
return role_value
elif role == qt_api.QtCore.Qt.DisplayRole:
if index == self.index(0, 0):
return "Hello"
return None
check_model(MyModel(), should_pass=should_pass)
def test_header_handling(check_model):
class MyModel(qt_api.QAbstractListModel):
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 1 if parent == qt_api.QtCore.QModelIndex() else 0
def set_header_text(self, header):
self._header_text = header
self.headerDataChanged.emit(qt_api.QtCore.Qt.Vertical, 0, 0)
self.headerDataChanged.emit(qt_api.QtCore.Qt.Horizontal, 0, 0)
def headerData(self, section, orientation, role=qt_api.QtCore.Qt.DisplayRole):
return self._header_text
def data(
self, index=qt_api.QtCore.QModelIndex(), role=qt_api.QtCore.Qt.DisplayRole
):
if role == qt_api.QtCore.Qt.DisplayRole and index == self.index(0, 0):
return "Contents"
return None
model = MyModel()
model.set_header_text("Start Header")
check_model(model, should_pass=True)
model.set_header_text("New Header")
@pytest.fixture
def check_model(qtmodeltester):
"""
Return a check_model(model, should_pass=True) function that uses
qtmodeltester to check if the model is OK or not according to the
``should_pass`` parameter.
"""
def check(model, should_pass=True):
if should_pass:
qtmodeltester.check(model, force_py=True)
else:
with pytest.raises(AssertionError):
qtmodeltester.check(model, force_py=True)
return check
def test_invalid_column_count(qtmodeltester):
"""Basic check with an invalid model."""
class Model(BasicModel):
def columnCount(self, parent=qt_api.QtCore.QModelIndex()):
return -1
model = Model()
with pytest.raises(AssertionError):
qtmodeltester.check(model, force_py=True)
def test_changing_model_insert(qtmodeltester):
model = qt_api.QStandardItemModel()
item = qt_api.QStandardItem("foo")
qtmodeltester.check(model, force_py=True)
model.insertRow(0, item)
def test_changing_model_remove(qtmodeltester):
model = qt_api.QStandardItemModel()
item = qt_api.QStandardItem("foo")
model.setItem(0, 0, item)
qtmodeltester.check(model, force_py=True)
model.removeRow(0)
def test_changing_model_data(qtmodeltester):
model = qt_api.QStandardItemModel()
item = qt_api.QStandardItem("foo")
model.setItem(0, 0, item)
qtmodeltester.check(model, force_py=True)
model.setData(model.index(0, 0), "hello world")
@pytest.mark.parametrize(
"orientation", [qt_api.QtCore.Qt.Horizontal, qt_api.QtCore.Qt.Vertical]
)
def test_changing_model_header_data(qtmodeltester, orientation):
model = qt_api.QStandardItemModel()
item = qt_api.QStandardItem("foo")
model.setItem(0, 0, item)
qtmodeltester.check(model, force_py=True)
model.setHeaderData(0, orientation, "blah")
def test_changing_model_sort(qtmodeltester):
"""Sorting emits layoutChanged"""
model = qt_api.QStandardItemModel()
item = qt_api.QStandardItem("foo")
model.setItem(0, 0, item)
qtmodeltester.check(model, force_py=True)
model.sort(0)
def test_nop(qtmodeltester):
"""We should not get a crash on cleanup with no model."""
pass
def test_overridden_methods(qtmodeltester):
"""Make sure overriden methods of a model are actually run.
With a previous implementation of the modeltester using sip.cast, the custom
implementations did never actually run.
"""
class Model(BasicModel):
def __init__(self, parent=None):
super().__init__(parent)
self.row_count_did_run = False
def rowCount(self, parent=None):
self.row_count_did_run = True
return 0
model = Model()
assert not model.row_count_did_run
qtmodeltester.check(model, force_py=True)
assert model.row_count_did_run
def test_fetch_more(qtmodeltester):
class Model(qt_api.QStandardItemModel):
def canFetchMore(self, parent):
return True
def fetchMore(self, parent):
"""Force a re-check while fetching more."""
self.setData(self.index(0, 0), "bar")
model = Model()
item = qt_api.QStandardItem("foo")
model.setItem(0, 0, item)
qtmodeltester.check(model, force_py=True)
def test_invalid_parent(qtmodeltester):
class Model(qt_api.QStandardItemModel):
def parent(self, index):
if index == self.index(0, 0, parent=self.index(0, 0)):
return self.index(0, 0)
else:
return qt_api.QtCore.QModelIndex()
model = Model()
item = qt_api.QStandardItem("foo")
item2 = qt_api.QStandardItem("bar")
item3 = qt_api.QStandardItem("bar")
model.setItem(0, 0, item)
item.setChild(0, item2)
item2.setChild(0, item3)
with pytest.raises(AssertionError):
qtmodeltester.check(model, force_py=True)
@pytest.mark.skipif(not modeltest.HAS_QT_TESTER, reason="No Qt modeltester available")
def test_qt_tester_valid(testdir):
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
from pytestqt import modeltest
assert modeltest.HAS_QT_TESTER
def test_ok(qtmodeltester):
model = qt_api.QStandardItemModel()
qtmodeltester.check(model)
"""
)
res = testdir.inline_run()
res.assertoutcome(passed=1, failed=0)
@pytest.mark.skipif(not modeltest.HAS_QT_TESTER, reason="No Qt modeltester available")
def test_qt_tester_invalid(testdir):
testdir.makeini(
"""
[pytest]
qt_log_level_fail = NO
"""
)
testdir.makepyfile(
"""
from pytestqt.qt_compat import qt_api
from pytestqt import modeltest
assert modeltest.HAS_QT_TESTER
class Model(qt_api.QtCore.QAbstractItemModel):
def data(self, index, role=qt_api.QtCore.Qt.DisplayRole):
return None
def rowCount(self, parent=qt_api.QtCore.QModelIndex()):
return 0
def columnCount(self, parent=qt_api.QtCore.QModelIndex()):
return -1
def index(self, row, column, parent=qt_api.QtCore.QModelIndex()):
return qt_api.QtCore.QModelIndex()
def parent(self, index):
return qt_api.QtCore.QModelIndex()
def test_ok(qtmodeltester):
model = Model()
qtmodeltester.check(model)
"""
)
res = testdir.runpytest()
res.stdout.fnmatch_lines(
[
"*__ test_ok __*",
"test_qt_tester_invalid.py:*: Qt modeltester errors",
"*-- Captured Qt messages --*",
"* QtWarningMsg: FAIL! model->columnCount(QModelIndex()) >= 0 () returned FALSE "
"(*qabstractitemmodeltester.cpp:*)",
"*-- Captured stdout call --*",
"modeltest: Using Qt C++ tester",
"*== 1 failed in * ==*",
]
)
|
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import math
import time
import shutil
import uuid
import errno
import re
from contextlib import contextmanager
from swift import gettext_ as _
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import subprocess
import swift.common.db
from swift.common.direct_client import quote
from swift.common.utils import get_logger, whataremyips, storage_directory, \
renamer, mkdirs, lock_parent_directory, config_true_value, \
unlink_older_than, dump_recon_cache, rsync_ip, ismount, json, Timestamp
from swift.common import ring
from swift.common.ring.utils import is_local_device
from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE
from swift.common.bufferedhttp import BufferedHTTPConnection
from swift.common.exceptions import DriveNotMounted
from swift.common.daemon import Daemon
from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \
HTTPAccepted, HTTPBadRequest
DEBUG_TIMINGS_THRESHOLD = 10
def quarantine_db(object_file, server_type):
"""
In the case that a corrupt file is found, move it to a quarantined area to
allow replication to fix it.
:param object_file: path to corrupt file
:param server_type: type of file that is corrupt
('container' or 'account')
"""
object_dir = os.path.dirname(object_file)
quarantine_dir = os.path.abspath(
os.path.join(object_dir, '..', '..', '..', '..', 'quarantined',
server_type + 's', os.path.basename(object_dir)))
try:
renamer(object_dir, quarantine_dir, fsync=False)
except OSError as e:
if e.errno not in (errno.EEXIST, errno.ENOTEMPTY):
raise
quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex)
renamer(object_dir, quarantine_dir, fsync=False)
def roundrobin_datadirs(datadirs):
"""
Generator to walk the data dirs in a round robin manner, evenly
hitting each device on the system, and yielding any .db files
found (in their proper places). The partitions within each data
dir are walked randomly, however.
:param datadirs: a list of (path, node_id) to walk
:returns: A generator of (partition, path_to_db_file, node_id)
"""
def walk_datadir(datadir, node_id):
partitions = os.listdir(datadir)
random.shuffle(partitions)
for partition in partitions:
part_dir = os.path.join(datadir, partition)
if not os.path.isdir(part_dir):
continue
suffixes = os.listdir(part_dir)
if not suffixes:
os.rmdir(part_dir)
for suffix in suffixes:
suff_dir = os.path.join(part_dir, suffix)
if not os.path.isdir(suff_dir):
continue
hashes = os.listdir(suff_dir)
for hsh in hashes:
hash_dir = os.path.join(suff_dir, hsh)
if not os.path.isdir(hash_dir):
continue
object_file = os.path.join(hash_dir, hsh + '.db')
if os.path.exists(object_file):
yield (partition, object_file, node_id)
its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs]
while its:
for it in its:
try:
yield it.next()
except StopIteration:
its.remove(it)
class ReplConnection(BufferedHTTPConnection):
"""
Helper to simplify REPLICATEing to a remote server.
"""
def __init__(self, node, partition, hash_, logger):
""
self.logger = logger
self.node = node
host = "%s:%s" % (node['replication_ip'], node['replication_port'])
BufferedHTTPConnection.__init__(self, host)
self.path = '/%s/%s/%s' % (node['device'], partition, hash_)
def replicate(self, *args):
"""
Make an HTTP REPLICATE request
:param args: list of json-encodable objects
:returns: bufferedhttp response object
"""
try:
body = json.dumps(args)
self.request('REPLICATE', self.path, body,
{'Content-Type': 'application/json'})
response = self.getresponse()
response.data = response.read()
return response
except (Exception, Timeout):
self.logger.exception(
_('ERROR reading HTTP response from %s'), self.node)
return None
class Replicator(Daemon):
"""
Implements the logic for directing db replication.
"""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='replicator')
self.root = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.port = int(conf.get('bind_port', self.default_port))
concurrency = int(conf.get('concurrency', 8))
self.cpool = GreenPool(size=concurrency)
swift_dir = conf.get('swift_dir', '/etc/swift')
self.ring = ring.Ring(swift_dir, ring_name=self.server_type)
self._local_device_ids = set()
self.per_diff = int(conf.get('per_diff', 1000))
self.max_diffs = int(conf.get('max_diffs') or 100)
self.interval = int(conf.get('interval') or
conf.get('run_pause') or 30)
self.vm_test_mode = config_true_value(conf.get('vm_test_mode', 'no'))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.rsync_compress = config_true_value(
conf.get('rsync_compress', 'no'))
self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7))
swift.common.db.DB_PREALLOCATION = \
config_true_value(conf.get('db_preallocation', 'f'))
self._zero_stats()
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.recon_replicator = '%s.recon' % self.server_type
self.rcache = os.path.join(self.recon_cache_path,
self.recon_replicator)
self.extract_device_re = re.compile('%s%s([^%s]+)' % (
self.root, os.path.sep, os.path.sep))
def _zero_stats(self):
"""Zero out the stats."""
self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0,
'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0,
'remove': 0, 'empty': 0, 'remote_merge': 0,
'start': time.time(), 'diff_capped': 0}
def _report_stats(self):
"""Report the current stats to the logs."""
now = time.time()
self.logger.info(
_('Attempted to replicate %(count)d dbs in %(time).5f seconds '
'(%(rate).5f/s)'),
{'count': self.stats['attempted'],
'time': now - self.stats['start'],
'rate': self.stats['attempted'] /
(now - self.stats['start'] + 0.0000001)})
self.logger.info(_('Removed %(remove)d dbs') % self.stats)
self.logger.info(_('%(success)s successes, %(failure)s failures')
% self.stats)
dump_recon_cache(
{'replication_stats': self.stats,
'replication_time': now - self.stats['start'],
'replication_last': now},
self.rcache, self.logger)
self.logger.info(' '.join(['%s:%s' % item for item in
self.stats.items() if item[0] in
('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl',
'empty', 'diff_capped')]))
def _rsync_file(self, db_file, remote_file, whole_file=True,
different_region=False):
"""
Sync a single file using rsync. Used by _rsync_db to handle syncing.
:param db_file: file to be synced
:param remote_file: remote location to sync the DB file to
:param whole-file: if True, uses rsync's --whole-file flag
:param different_region: if True, the destination node is in a
different region
:returns: True if the sync was successful, False otherwise
"""
popen_args = ['rsync', '--quiet', '--no-motd',
'--timeout=%s' % int(math.ceil(self.node_timeout)),
'--contimeout=%s' % int(math.ceil(self.conn_timeout))]
if whole_file:
popen_args.append('--whole-file')
if self.rsync_compress and different_region:
# Allow for compression, but only if the remote node is in
# a different region than the local one.
popen_args.append('--compress')
popen_args.extend([db_file, remote_file])
proc = subprocess.Popen(popen_args)
proc.communicate()
if proc.returncode != 0:
self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'),
{'code': proc.returncode, 'args': popen_args})
return proc.returncode == 0
def _rsync_db(self, broker, device, http, local_id,
replicate_method='complete_rsync', replicate_timeout=None,
different_region=False):
"""
Sync a whole db using rsync.
:param broker: DB broker object of DB to be synced
:param device: device to sync to
:param http: ReplConnection object
:param local_id: unique ID of the local database replica
:param replicate_method: remote operation to perform after rsync
:param replicate_timeout: timeout to wait in seconds
:param different_region: if True, the destination node is in a
different region
"""
device_ip = rsync_ip(device['replication_ip'])
if self.vm_test_mode:
remote_file = '%s::%s%s/%s/tmp/%s' % (
device_ip, self.server_type, device['replication_port'],
device['device'], local_id)
else:
remote_file = '%s::%s/%s/tmp/%s' % (
device_ip, self.server_type, device['device'], local_id)
mtime = os.path.getmtime(broker.db_file)
if not self._rsync_file(broker.db_file, remote_file,
different_region=different_region):
return False
# perform block-level sync if the db was modified during the first sync
if os.path.exists(broker.db_file + '-journal') or \
os.path.getmtime(broker.db_file) > mtime:
# grab a lock so nobody else can modify it
with broker.lock():
if not self._rsync_file(broker.db_file, remote_file,
whole_file=False,
different_region=different_region):
return False
with Timeout(replicate_timeout or self.node_timeout):
response = http.replicate(replicate_method, local_id)
return response and response.status >= 200 and response.status < 300
def _usync_db(self, point, broker, http, remote_id, local_id):
"""
Sync a db by sending all records since the last sync.
:param point: synchronization high water mark between the replicas
:param broker: database broker object
:param http: ReplConnection object for the remote server
:param remote_id: database id for the remote replica
:param local_id: database id for the local replica
:returns: boolean indicating completion and success
"""
self.stats['diff'] += 1
self.logger.increment('diffs')
self.logger.debug('Syncing chunks with %s, starting at %s',
http.host, point)
sync_table = broker.get_syncs()
objects = broker.get_items_since(point, self.per_diff)
diffs = 0
while len(objects) and diffs < self.max_diffs:
diffs += 1
with Timeout(self.node_timeout):
response = http.replicate('merge_items', objects, local_id)
if not response or response.status >= 300 or response.status < 200:
if response:
self.logger.error(_('ERROR Bad response %(status)s from '
'%(host)s'),
{'status': response.status,
'host': http.host})
return False
# replication relies on db order to send the next merge batch in
# order with no gaps
point = objects[-1]['ROWID']
objects = broker.get_items_since(point, self.per_diff)
if objects:
self.logger.debug(
'Synchronization for %s has fallen more than '
'%s rows behind; moving on and will try again next pass.',
broker, self.max_diffs * self.per_diff)
self.stats['diff_capped'] += 1
self.logger.increment('diff_caps')
else:
with Timeout(self.node_timeout):
response = http.replicate('merge_syncs', sync_table)
if response and response.status >= 200 and response.status < 300:
broker.merge_syncs([{'remote_id': remote_id,
'sync_point': point}],
incoming=False)
return True
return False
def _in_sync(self, rinfo, info, broker, local_sync):
"""
Determine whether or not two replicas of a databases are considered
to be in sync.
:param rinfo: remote database info
:param info: local database info
:param broker: database broker object
:param local_sync: cached last sync point between replicas
:returns: boolean indicating whether or not the replicas are in sync
"""
if max(rinfo['point'], local_sync) >= info['max_row']:
self.stats['no_change'] += 1
self.logger.increment('no_changes')
return True
if rinfo['hash'] == info['hash']:
self.stats['hashmatch'] += 1
self.logger.increment('hashmatches')
broker.merge_syncs([{'remote_id': rinfo['id'],
'sync_point': rinfo['point']}],
incoming=False)
return True
def _http_connect(self, node, partition, db_file):
"""
Make an http_connection using ReplConnection
:param node: node dictionary from the ring
:param partition: partition partition to send in the url
:param db_file: DB file
:returns: ReplConnection object
"""
return ReplConnection(node, partition,
os.path.basename(db_file).split('.', 1)[0],
self.logger)
def _gather_sync_args(self, info):
"""
Convert local replication_info to sync args tuple.
"""
sync_args_order = ('max_row', 'hash', 'id', 'created_at',
'put_timestamp', 'delete_timestamp', 'metadata')
return tuple(info[key] for key in sync_args_order)
def _repl_to_node(self, node, broker, partition, info,
different_region=False):
"""
Replicate a database to a node.
:param node: node dictionary from the ring to be replicated to
:param broker: DB broker for the DB to be replication
:param partition: partition on the node to replicate to
:param info: DB info as a dictionary of {'max_row', 'hash', 'id',
'created_at', 'put_timestamp', 'delete_timestamp',
'metadata'}
:param different_region: if True, the destination node is in a
different region
:returns: True if successful, False otherwise
"""
http = self._http_connect(node, partition, broker.db_file)
sync_args = self._gather_sync_args(info)
with Timeout(self.node_timeout):
response = http.replicate('sync', *sync_args)
if not response:
return False
return self._handle_sync_response(node, response, info, broker, http,
different_region=different_region)
def _handle_sync_response(self, node, response, info, broker, http,
different_region=False):
if response.status == HTTP_NOT_FOUND: # completely missing, rsync
self.stats['rsync'] += 1
self.logger.increment('rsyncs')
return self._rsync_db(broker, node, http, info['id'],
different_region=different_region)
elif response.status == HTTP_INSUFFICIENT_STORAGE:
raise DriveNotMounted()
elif response.status >= 200 and response.status < 300:
rinfo = json.loads(response.data)
local_sync = broker.get_sync(rinfo['id'], incoming=False)
if self._in_sync(rinfo, info, broker, local_sync):
return True
# if the difference in rowids between the two differs by
# more than 50%, rsync then do a remote merge.
if rinfo['max_row'] / float(info['max_row']) < 0.5:
self.stats['remote_merge'] += 1
self.logger.increment('remote_merges')
return self._rsync_db(broker, node, http, info['id'],
replicate_method='rsync_then_merge',
replicate_timeout=(info['count'] / 2000),
different_region=different_region)
# else send diffs over to the remote server
return self._usync_db(max(rinfo['point'], local_sync),
broker, http, rinfo['id'], info['id'])
def _post_replicate_hook(self, broker, info, responses):
"""
:param broker: the container that just replicated
:param info: pre-replication full info dict
:param responses: a list of bools indicating success from nodes
"""
pass
def _replicate_object(self, partition, object_file, node_id):
"""
Replicate the db, choosing method based on whether or not it
already exists on peers.
:param partition: partition to be replicated to
:param object_file: DB file name to be replicated
:param node_id: node id of the node to be replicated to
"""
start_time = now = time.time()
self.logger.debug('Replicating db %s', object_file)
self.stats['attempted'] += 1
self.logger.increment('attempts')
shouldbehere = True
try:
broker = self.brokerclass(object_file, pending_timeout=30)
broker.reclaim(now - self.reclaim_age,
now - (self.reclaim_age * 2))
info = broker.get_replication_info()
bpart = self.ring.get_part(
info['account'], info.get('container'))
if bpart != int(partition):
partition = bpart
# Important to set this false here since the later check only
# checks if it's on the proper device, not partition.
shouldbehere = False
name = '/' + quote(info['account'])
if 'container' in info:
name += '/' + quote(info['container'])
self.logger.error(
'Found %s for %s when it should be on partition %s; will '
'replicate out and remove.' % (object_file, name, bpart))
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error(_('Quarantining DB %s'), object_file)
quarantine_db(broker.db_file, broker.db_type)
else:
self.logger.exception(_('ERROR reading db %s'), object_file)
self.stats['failure'] += 1
self.logger.increment('failures')
return
# The db is considered deleted if the delete_timestamp value is greater
# than the put_timestamp, and there are no objects.
delete_timestamp = Timestamp(info.get('delete_timestamp') or 0)
put_timestamp = Timestamp(info.get('put_timestamp') or 0)
if delete_timestamp < (now - self.reclaim_age) and \
delete_timestamp > put_timestamp and \
info['count'] in (None, '', 0, '0'):
if self.report_up_to_date(info):
self.delete_db(broker)
self.logger.timing_since('timing', start_time)
return
responses = []
nodes = self.ring.get_part_nodes(int(partition))
local_dev = None
for node in nodes:
if node['id'] == node_id:
local_dev = node
break
if shouldbehere:
shouldbehere = bool([n for n in nodes if n['id'] == node_id])
# See Footnote [1] for an explanation of the repl_nodes assignment.
i = 0
while i < len(nodes) and nodes[i]['id'] != node_id:
i += 1
repl_nodes = nodes[i + 1:] + nodes[:i]
more_nodes = self.ring.get_more_nodes(int(partition))
if not local_dev:
# Check further if local device is a handoff node
for node in more_nodes:
if node['id'] == node_id:
local_dev = node
break
for node in repl_nodes:
different_region = False
if local_dev and local_dev['region'] != node['region']:
# This additional information will help later if we
# want to handle syncing to a node in different
# region with some optimizations.
different_region = True
success = False
try:
success = self._repl_to_node(node, broker, partition, info,
different_region)
except DriveNotMounted:
repl_nodes.append(more_nodes.next())
self.logger.error(_('ERROR Remote drive not mounted %s'), node)
except (Exception, Timeout):
self.logger.exception(_('ERROR syncing %(file)s with node'
' %(node)s'),
{'file': object_file, 'node': node})
self.stats['success' if success else 'failure'] += 1
self.logger.increment('successes' if success else 'failures')
responses.append(success)
try:
self._post_replicate_hook(broker, info, responses)
except (Exception, Timeout):
self.logger.exception('UNHANDLED EXCEPTION: in post replicate '
'hook for %s', broker.db_file)
if not shouldbehere and all(responses):
# If the db shouldn't be on this node and has been successfully
# synced to all of its peers, it can be removed.
self.delete_db(broker)
self.logger.timing_since('timing', start_time)
def delete_db(self, broker):
object_file = broker.db_file
hash_dir = os.path.dirname(object_file)
suf_dir = os.path.dirname(hash_dir)
with lock_parent_directory(object_file):
shutil.rmtree(hash_dir, True)
try:
os.rmdir(suf_dir)
except OSError as err:
if err.errno not in (errno.ENOENT, errno.ENOTEMPTY):
self.logger.exception(
_('ERROR while trying to clean up %s') % suf_dir)
self.stats['remove'] += 1
device_name = self.extract_device(object_file)
self.logger.increment('removes.' + device_name)
def extract_device(self, object_file):
"""
Extract the device name from an object path. Returns "UNKNOWN" if the
path could not be extracted successfully for some reason.
:param object_file: the path to a database file.
"""
match = self.extract_device_re.match(object_file)
if match:
return match.groups()[0]
return "UNKNOWN"
def report_up_to_date(self, full_info):
return True
def run_once(self, *args, **kwargs):
"""Run a replication pass once."""
self._zero_stats()
dirs = []
ips = whataremyips()
if not ips:
self.logger.error(_('ERROR Failed to get my own IPs?'))
return
self._local_device_ids = set()
for node in self.ring.devs:
if node and is_local_device(ips, self.port,
node['replication_ip'],
node['replication_port']):
if self.mount_check and not ismount(
os.path.join(self.root, node['device'])):
self.logger.warn(
_('Skipping %(device)s as it is not mounted') % node)
continue
unlink_older_than(
os.path.join(self.root, node['device'], 'tmp'),
time.time() - self.reclaim_age)
datadir = os.path.join(self.root, node['device'], self.datadir)
if os.path.isdir(datadir):
self._local_device_ids.add(node['id'])
dirs.append((datadir, node['id']))
self.logger.info(_('Beginning replication run'))
for part, object_file, node_id in roundrobin_datadirs(dirs):
self.cpool.spawn_n(
self._replicate_object, part, object_file, node_id)
self.cpool.waitall()
self.logger.info(_('Replication run OVER'))
self._report_stats()
def run_forever(self, *args, **kwargs):
"""
Replicate dbs under the given root in an infinite loop.
"""
sleep(random.random() * self.interval)
while True:
begin = time.time()
try:
self.run_once()
except (Exception, Timeout):
self.logger.exception(_('ERROR trying to replicate'))
elapsed = time.time() - begin
if elapsed < self.interval:
sleep(self.interval - elapsed)
class ReplicatorRpc(object):
"""Handle Replication RPC calls. TODO(redbo): document please :)"""
def __init__(self, root, datadir, broker_class, mount_check=True,
logger=None):
self.root = root
self.datadir = datadir
self.broker_class = broker_class
self.mount_check = mount_check
self.logger = logger or get_logger({}, log_route='replicator-rpc')
def dispatch(self, replicate_args, args):
if not hasattr(args, 'pop'):
return HTTPBadRequest(body='Invalid object type')
op = args.pop(0)
drive, partition, hsh = replicate_args
if self.mount_check and not ismount(os.path.join(self.root, drive)):
return Response(status='507 %s is not mounted' % drive)
db_file = os.path.join(self.root, drive,
storage_directory(self.datadir, partition, hsh),
hsh + '.db')
if op == 'rsync_then_merge':
return self.rsync_then_merge(drive, db_file, args)
if op == 'complete_rsync':
return self.complete_rsync(drive, db_file, args)
else:
# someone might be about to rsync a db to us,
# make sure there's a tmp dir to receive it.
mkdirs(os.path.join(self.root, drive, 'tmp'))
if not os.path.exists(db_file):
return HTTPNotFound()
return getattr(self, op)(self.broker_class(db_file), args)
@contextmanager
def debug_timing(self, name):
timemark = time.time()
yield
timespan = time.time() - timemark
if timespan > DEBUG_TIMINGS_THRESHOLD:
self.logger.debug(
'replicator-rpc-sync time for %s: %.02fs' % (
name, timespan))
def _parse_sync_args(self, args):
"""
Convert remote sync args to remote_info dictionary.
"""
(remote_sync, hash_, id_, created_at, put_timestamp,
delete_timestamp, metadata) = args[:7]
remote_metadata = {}
if metadata:
try:
remote_metadata = json.loads(metadata)
except ValueError:
self.logger.error("Unable to decode remote metadata %r",
metadata)
remote_info = {
'point': remote_sync,
'hash': hash_,
'id': id_,
'created_at': created_at,
'put_timestamp': put_timestamp,
'delete_timestamp': delete_timestamp,
'metadata': remote_metadata,
}
return remote_info
def sync(self, broker, args):
remote_info = self._parse_sync_args(args)
return self._handle_sync_request(broker, remote_info)
def _get_synced_replication_info(self, broker, remote_info):
"""
Apply any changes to the broker based on remote_info and return the
current replication info.
:param broker: the database broker
:param remote_info: the remote replication info
:returns: local broker replication info
"""
return broker.get_replication_info()
def _handle_sync_request(self, broker, remote_info):
"""
Update metadata, timestamps, sync points.
"""
with self.debug_timing('info'):
try:
info = self._get_synced_replication_info(broker, remote_info)
except (Exception, Timeout) as e:
if 'no such table' in str(e):
self.logger.error(_("Quarantining DB %s"), broker)
quarantine_db(broker.db_file, broker.db_type)
return HTTPNotFound()
raise
if remote_info['metadata']:
with self.debug_timing('update_metadata'):
broker.update_metadata(remote_info['metadata'])
sync_timestamps = ('created_at', 'put_timestamp', 'delete_timestamp')
if any(info[ts] != remote_info[ts] for ts in sync_timestamps):
with self.debug_timing('merge_timestamps'):
broker.merge_timestamps(*(remote_info[ts] for ts in
sync_timestamps))
with self.debug_timing('get_sync'):
info['point'] = broker.get_sync(remote_info['id'])
if remote_info['hash'] == info['hash'] and \
info['point'] < remote_info['point']:
with self.debug_timing('merge_syncs'):
translate = {
'remote_id': 'id',
'sync_point': 'point',
}
data = dict((k, remote_info[v]) for k, v in translate.items())
broker.merge_syncs([data])
info['point'] = remote_info['point']
return Response(json.dumps(info))
def merge_syncs(self, broker, args):
broker.merge_syncs(args[0])
return HTTPAccepted()
def merge_items(self, broker, args):
broker.merge_items(args[0], args[1])
return HTTPAccepted()
def complete_rsync(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if os.path.exists(db_file):
return HTTPNotFound()
if not os.path.exists(old_filename):
return HTTPNotFound()
broker = self.broker_class(old_filename)
broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
def rsync_then_merge(self, drive, db_file, args):
old_filename = os.path.join(self.root, drive, 'tmp', args[0])
if not os.path.exists(db_file) or not os.path.exists(old_filename):
return HTTPNotFound()
new_broker = self.broker_class(old_filename)
existing_broker = self.broker_class(db_file)
point = -1
objects = existing_broker.get_items_since(point, 1000)
while len(objects):
new_broker.merge_items(objects)
point = objects[-1]['ROWID']
objects = existing_broker.get_items_since(point, 1000)
sleep()
new_broker.newid(args[0])
renamer(old_filename, db_file)
return HTTPNoContent()
# Footnote [1]:
# This orders the nodes so that, given nodes a b c, a will contact b then c,
# b will contact c then a, and c will contact a then b -- in other words, each
# node will always contact the next node in the list first.
# This helps in the case where databases are all way out of sync, so each
# node is likely to be sending to a different node than it's receiving from,
# rather than two nodes talking to each other, starving out the third.
# If the third didn't even have a copy and the first two nodes were way out
# of sync, such starvation would mean the third node wouldn't get any copy
# until the first two nodes finally got in sync, which could take a while.
# This new ordering ensures such starvation doesn't occur, making the data
# more durable.
|
|
#!/usr/bin/env python
############################################################################
#
# Copyright (c) 2013, 2014 PX4 Development Team. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# 3. Neither the name PX4 nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
############################################################################
#
# Generate multirotor mixer scale tables compatible with the ArduCopter layout
#
# for python2.7 compatibility
from __future__ import print_function
import math
print("/*")
print("* This file is automatically generated by multi_tables - do not edit.")
print("*/")
print("")
print("#ifndef _MIXER_MULTI_TABLES")
print("#define _MIXER_MULTI_TABLES")
print("")
def rcos(angleInRadians):
return math.cos(math.radians(angleInRadians))
CCW = 1.0
CW = -CCW
quad_x = [
[ 45, CCW],
[-135, CCW],
[-45, CW],
[135, CW],
]
quad_h = [
[ 45, CW],
[-135, CW],
[-45, CCW],
[135, CCW],
]
quad_plus = [
[ 90, CCW],
[ -90, CCW],
[ 0, CW],
[ 180, CW],
]
quad_deadcat = [
[ 63, CCW, 1.0],
[-135, CCW, 0.964],
[ -63, CW, 1.0],
[ 135, CW, 0.964],
]
quad_v = [
[ 18.8, 0.4242],
[ -18.8, 1.0],
[ -18.8, -0.4242],
[ 18.8, -1.0],
]
quad_wide = [
[ 68, CCW],
[ -129, CCW],
[ -68, CW],
[ 129, CW],
]
hex_x = [
[ 90, CW],
[ -90, CCW],
[ -30, CW],
[ 150, CCW],
[ 30, CCW],
[-150, CW],
]
hex_plus = [
[ 0, CW],
[ 180, CCW],
[-120, CW],
[ 60, CCW],
[ -60, CCW],
[ 120, CW],
]
hex_cox = [
[ 60, CW],
[ 60, CCW],
[ 180, CW],
[ 180, CCW],
[ -60, CW],
[ -60, CCW],
]
hex_t = [
[ 43.21, CCW],
[ 43.21, CW],
[ 180, CW],
[ 180, CCW],
[ -43.21, CW],
[ -43.21, CCW],
]
octa_x = [
[ 22.5, CW],
[-157.5, CW],
[ 67.5, CCW],
[ 157.5, CCW],
[ -22.5, CCW],
[-112.5, CCW],
[ -67.5, CW],
[ 112.5, CW],
]
octa_plus = [
[ 0, CW],
[ 180, CW],
[ 45, CCW],
[ 135, CCW],
[ -45, CCW],
[-135, CCW],
[ -90, CW],
[ 90, CW],
]
octa_cox = [
[ 45, CCW],
[ -45, CW],
[-135, CCW],
[ 135, CW],
[ -45, CCW],
[ 45, CW],
[ 135, CCW],
[-135, CW],
]
octa_cox_wide = [
[ 68, CCW],
[ -68, CW],
[-129, CCW],
[ 129, CW],
[ -68, CCW],
[ 68, CW],
[ 129, CCW],
[-129, CW],
]
twin_engine = [
[ 90, 0.0],
[-90, 0.0],
]
tri_y = [
[ 60, 0.0],
[ -60, 0.0],
[ 180, 0.0],
]
tables = [quad_x, quad_h, quad_plus, quad_v, quad_wide, quad_deadcat, hex_x, hex_plus, hex_cox, hex_t, octa_x, octa_plus, octa_cox, octa_cox_wide, twin_engine, tri_y]
def variableName(variable):
for variableName, value in list(globals().items()):
if value is variable:
return variableName
def unpackScales(scalesList):
if len(scalesList) == 2:
scalesList += [1.0] #Add thrust scale
return scalesList
def printEnum():
print("enum class MultirotorGeometry : MultirotorGeometryUnderlyingType {")
for table in tables:
print("\t{},".format(variableName(table).upper()))
print("\n\tMAX_GEOMETRY")
print("}; // enum class MultirotorGeometry\n")
def printScaleTables():
for table in tables:
print("const MultirotorMixer::Rotor _config_{}[] = {{".format(variableName(table)))
for row in table:
angle, yawScale, thrustScale = unpackScales(row)
rollScale = rcos(angle + 90)
pitchScale = rcos(angle)
print("\t{{ {:9f}, {:9f}, {:9f}, {:9f} }},".format(rollScale, pitchScale, yawScale, thrustScale))
print("};\n")
def printScaleTablesIndex():
print("const MultirotorMixer::Rotor *_config_index[] = {")
for table in tables:
print("\t&_config_{}[0],".format(variableName(table)))
print("};\n")
def printScaleTablesCounts():
print("const unsigned _config_rotor_count[] = {")
for table in tables:
print("\t{}, /* {} */".format(len(table), variableName(table)))
print("};\n")
printEnum()
print("namespace {")
printScaleTables()
printScaleTablesIndex()
printScaleTablesCounts()
print("} // anonymous namespace\n")
print("#endif /* _MIXER_MULTI_TABLES */")
print("")
|
|
# -*- coding: utf-8 -*-
"""VGG16 model for Keras.
# Reference
- [Very Deep Convolutional Networks for Large-Scale Image Recognition](https://arxiv.org/abs/1409.1556)
"""
from __future__ import print_function
from __future__ import absolute_import
import warnings
import sys
sys.path.append('/home/cbritt2/keras/build/lib')
from keras.models import Model
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Input
from keras.layers import Conv2D
from keras.layers import MaxPooling2D
from keras.layers import GlobalAveragePooling2D
from keras.layers import GlobalMaxPooling2D
from keras.engine.topology import get_source_inputs
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras import backend as K
from keras.applications.imagenet_utils import decode_predictions
from keras.applications.imagenet_utils import preprocess_input
from keras.applications.imagenet_utils import _obtain_input_shape
WEIGHTS_PATH = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels.h5'
WEIGHTS_PATH_NO_TOP = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.1/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5'
def VGG16(include_top=True, weights='imagenet',
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the VGG16 architecture.
Optionally loads weights pre-trained
on ImageNet. Note that when using TensorFlow,
for best performance you should set
`image_data_format='channels_last'` in your Keras config
at ~/.keras/keras.json.
The model and the weights are compatible with both
TensorFlow and Theano. The data format
convention used by the model is the one
specified in your Keras config file.
# Arguments
include_top: whether to include the 3 fully-connected
layers at the top of the network.
weights: one of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or `(3, 224, 224)` (with `channels_first` data format).
It should have exactly 3 input channels,
and width and height should be no smaller than 48.
E.g. `(200, 200, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
print('here i am')
# set resource restrictions
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.9
K.set_session(tf.Session(config=config))
print('Ive set a custom session')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=48,
data_format=K.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Block 1
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)
x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)
x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)
x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
if include_top:
# Classification block
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='fc1')(x)
x = Dense(4096, activation='relu', name='fc2')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='vgg16')
# load weights
if weights == 'imagenet':
if include_top:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH,
cache_subdir='models',
file_hash='64373286793e3c8b2b4e3219cbf3544b')
else:
weights_path = get_file('vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5',
WEIGHTS_PATH_NO_TOP,
cache_subdir='models',
file_hash='6d6bbae143d832006294945121d1f1fc')
model.load_weights(weights_path)
if K.backend() == 'theano':
layer_utils.convert_all_kernels_in_model(model)
if K.image_data_format() == 'channels_first':
if include_top:
maxpool = model.get_layer(name='block5_pool')
shape = maxpool.output_shape[1:]
dense = model.get_layer(name='fc1')
layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
if K.backend() == 'tensorflow':
warnings.warn('You are using the TensorFlow backend, yet you '
'are using the Theano '
'image data format convention '
'(`image_data_format="channels_first"`). '
'For best performance, set '
'`image_data_format="channels_last"` in '
'your Keras config '
'at ~/.keras/keras.json.')
return model
|
|
import jsonschema
from django.utils import timezone
from rest_framework import serializers as ser
from rest_framework import exceptions
from addons.twofactor.models import UserSettings as TwoFactorUserSettings
from api.base.exceptions import InvalidModelValueError, Conflict
from api.base.serializers import (
BaseAPISerializer, JSONAPISerializer, JSONAPIRelationshipSerializer,
VersionedDateTimeField, HideIfDisabled, IDField,
Link, LinksField, TypeField, RelationshipField, JSONAPIListField,
WaterbutlerLink, ShowIfCurrentUser,
)
from api.base.utils import absolute_reverse, get_user_auth, waterbutler_api_url_for, is_deprecated
from api.files.serializers import QuickFilesSerializer
from osf.exceptions import ValidationValueError, ValidationError
from osf.models import OSFUser, QuickFilesNode
from website.settings import MAILCHIMP_GENERAL_LIST, OSF_HELP_LIST, CONFIRM_REGISTRATIONS_BY_EMAIL
from osf.models.provider import AbstractProviderGroupObjectPermission
from website.profile.views import update_osf_help_mails_subscription, update_mailchimp_subscription
from api.nodes.serializers import NodeSerializer
from api.users.schemas.utils import validate_user_json, from_json
from framework.auth.views import send_confirm_email
class QuickFilesRelationshipField(RelationshipField):
def to_representation(self, value):
relationship_links = super(QuickFilesRelationshipField, self).to_representation(value)
quickfiles_guid = value.nodes_created.filter(type=QuickFilesNode._typedmodels_type).values_list('guids___id', flat=True).get()
upload_url = waterbutler_api_url_for(quickfiles_guid, 'osfstorage')
relationship_links['links']['upload'] = {
'href': upload_url,
'meta': {},
}
relationship_links['links']['download'] = {
'href': '{}?zip='.format(upload_url),
'meta': {},
}
return relationship_links
class SocialField(ser.DictField):
def __init__(self, min_version, **kwargs):
super(SocialField, self).__init__(**kwargs)
self.min_version = min_version
self.help_text = 'This field will change data formats after version {}'.format(self.min_version)
def to_representation(self, value):
old_social_string_fields = ['twitter', 'github', 'linkedIn']
request = self.context.get('request')
show_old_format = request and is_deprecated(request.version, self.min_version) and request.method == 'GET'
if show_old_format:
social = value.copy()
for key in old_social_string_fields:
if social.get(key):
social[key] = value[key][0]
elif social.get(key) == []:
social[key] = ''
value = social
return super(SocialField, self).to_representation(value)
class UserSerializer(JSONAPISerializer):
filterable_fields = frozenset([
'full_name',
'given_name',
'middle_names',
'family_name',
'id',
])
writeable_method_fields = frozenset([
'accepted_terms_of_service',
])
non_anonymized_fields = ['type']
id = IDField(source='_id', read_only=True)
type = TypeField()
full_name = ser.CharField(source='fullname', required=True, label='Full name', help_text='Display name used in the general user interface', max_length=186)
given_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
middle_names = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
family_name = ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations')
suffix = HideIfDisabled(ser.CharField(required=False, allow_blank=True, help_text='For bibliographic citations'))
date_registered = HideIfDisabled(VersionedDateTimeField(read_only=True))
active = HideIfDisabled(ser.BooleanField(read_only=True, source='is_active'))
timezone = HideIfDisabled(ser.CharField(required=False, help_text="User's timezone, e.g. 'Etc/UTC"))
locale = HideIfDisabled(ser.CharField(required=False, help_text="User's locale, e.g. 'en_US'"))
social = SocialField(required=False, min_version='2.10')
employment = JSONAPIListField(required=False, source='jobs')
education = JSONAPIListField(required=False, source='schools')
can_view_reviews = ShowIfCurrentUser(ser.SerializerMethodField(help_text='Whether the current user has the `view_submissions` permission to ANY reviews provider.'))
accepted_terms_of_service = ShowIfCurrentUser(ser.SerializerMethodField())
links = HideIfDisabled(LinksField(
{
'html': 'absolute_url',
'profile_image': 'profile_image_url',
},
))
nodes = HideIfDisabled(RelationshipField(
related_view='users:user-nodes',
related_view_kwargs={'user_id': '<_id>'},
related_meta={'projects_in_common': 'get_projects_in_common'},
))
quickfiles = HideIfDisabled(QuickFilesRelationshipField(
related_view='users:user-quickfiles',
related_view_kwargs={'user_id': '<_id>'},
))
registrations = HideIfDisabled(RelationshipField(
related_view='users:user-registrations',
related_view_kwargs={'user_id': '<_id>'},
))
institutions = HideIfDisabled(RelationshipField(
related_view='users:user-institutions',
related_view_kwargs={'user_id': '<_id>'},
self_view='users:user-institutions-relationship',
self_view_kwargs={'user_id': '<_id>'},
))
preprints = HideIfDisabled(RelationshipField(
related_view='users:user-preprints',
related_view_kwargs={'user_id': '<_id>'},
))
emails = ShowIfCurrentUser(RelationshipField(
related_view='users:user-emails',
related_view_kwargs={'user_id': '<_id>'},
))
default_region = ShowIfCurrentUser(RelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': 'get_default_region_id'},
read_only=True,
))
class Meta:
type_ = 'users'
def get_projects_in_common(self, obj):
user = get_user_auth(self.context['request']).user
if obj == user:
return user.contributor_to.count()
return obj.n_projects_in_common(user)
def absolute_url(self, obj):
if obj is not None:
return obj.absolute_url
return None
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-detail', kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_can_view_reviews(self, obj):
group_qs = AbstractProviderGroupObjectPermission.objects.filter(group__user=obj, permission__codename='view_submissions')
return group_qs.exists() or obj.abstractprovideruserobjectpermission_set.filter(permission__codename='view_submissions')
def get_default_region_id(self, obj):
try:
# use the annotated value if possible
region_id = obj.default_region
except AttributeError:
# use computed property if region annotation does not exist
region_id = obj.osfstorage_region._id
return region_id
def get_accepted_terms_of_service(self, obj):
return bool(obj.accepted_terms_of_service)
def profile_image_url(self, user):
size = self.context['request'].query_params.get('profile_image_size')
return user.profile_image_url(size=size)
def validate_employment(self, value):
validate_user_json(value, 'employment-schema.json')
return value
def validate_education(self, value):
validate_user_json(value, 'education-schema.json')
return value
def validate_social(self, value):
schema = from_json('social-schema.json')
try:
jsonschema.validate(value, schema)
except jsonschema.ValidationError as e:
raise InvalidModelValueError(e)
return value
def update(self, instance, validated_data):
assert isinstance(instance, OSFUser), 'instance must be a User'
for attr, value in validated_data.items():
if 'social' == attr:
for key, val in value.items():
instance.social[key] = val
elif 'accepted_terms_of_service' == attr:
if value and not instance.accepted_terms_of_service:
instance.accepted_terms_of_service = timezone.now()
else:
setattr(instance, attr, value)
try:
instance.save()
except ValidationValueError as e:
raise InvalidModelValueError(detail=e.message)
except ValidationError as e:
raise InvalidModelValueError(e)
return instance
class UserAddonSettingsSerializer(JSONAPISerializer):
"""
Overrides UserSerializer to make id required.
"""
id = ser.CharField(source='config.short_name', read_only=True)
user_has_auth = ser.BooleanField(source='has_auth', read_only=True)
links = LinksField({
'self': 'get_absolute_url',
'accounts': 'account_links',
})
class Meta:
type_ = 'user_addons'
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-addon-detail',
kwargs={
'provider': obj.config.short_name,
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def account_links(self, obj):
# TODO: [OSF-4933] remove this after refactoring Figshare
if hasattr(obj, 'external_accounts'):
return {
account._id: {
'account': absolute_reverse(
'users:user-external_account-detail', kwargs={
'user_id': obj.owner._id,
'provider': obj.config.short_name,
'account_id': account._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
),
'nodes_connected': [n.absolute_api_v2_url for n in obj.get_attached_nodes(account)],
}
for account in obj.external_accounts.all()
}
return {}
class UserDetailSerializer(UserSerializer):
"""
Overrides UserSerializer to make id required.
"""
id = IDField(source='_id', required=True)
class UserQuickFilesSerializer(QuickFilesSerializer):
links = LinksField({
'info': Link('files:file-detail', kwargs={'file_id': '<_id>'}),
'upload': WaterbutlerLink(),
'delete': WaterbutlerLink(),
'move': WaterbutlerLink(),
'download': WaterbutlerLink(must_be_file=True),
})
class ReadEmailUserDetailSerializer(UserDetailSerializer):
email = ser.CharField(source='username', read_only=True)
class RelatedInstitution(JSONAPIRelationshipSerializer):
id = ser.CharField(required=False, allow_null=True, source='_id')
class Meta:
type_ = 'institutions'
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class UserInstitutionsRelationshipSerializer(BaseAPISerializer):
data = ser.ListField(child=RelatedInstitution())
links = LinksField({
'self': 'get_self_url',
'html': 'get_related_url',
})
def get_self_url(self, obj):
return absolute_reverse(
'users:user-institutions-relationship', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_related_url(self, obj):
return absolute_reverse(
'users:user-institutions', kwargs={
'user_id': obj['self']._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
def get_absolute_url(self, obj):
return obj.absolute_api_v2_url
class Meta:
type_ = 'institutions'
class UserIdentitiesSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
external_id = ser.CharField(read_only=True)
status = ser.CharField(read_only=True)
links = LinksField({
'self': 'get_absolute_url',
})
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user-identities-detail',
kwargs={
'user_id': self.context['request'].parser_context['kwargs']['user_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
'identity_id': obj['_id'],
},
)
class Meta:
type_ = 'external-identities'
class UserAccountExportSerializer(BaseAPISerializer):
type = TypeField()
class Meta:
type_ = 'user-account-export-form'
class UserAccountDeactivateSerializer(BaseAPISerializer):
type = TypeField()
class Meta:
type_ = 'user-account-deactivate-form'
class UserChangePasswordSerializer(BaseAPISerializer):
type = TypeField()
existing_password = ser.CharField(write_only=True, required=True)
new_password = ser.CharField(write_only=True, required=True)
class Meta:
type_ = 'user_password'
class UserSettingsSerializer(JSONAPISerializer):
id = IDField(source='_id', read_only=True)
type = TypeField()
two_factor_enabled = ser.SerializerMethodField()
subscribe_osf_general_email = ser.SerializerMethodField()
subscribe_osf_help_email = ser.SerializerMethodField()
def get_two_factor_enabled(self, obj):
try:
two_factor = TwoFactorUserSettings.objects.get(owner_id=obj.id)
return not two_factor.deleted
except TwoFactorUserSettings.DoesNotExist:
return False
def get_subscribe_osf_general_email(self, obj):
return obj.mailchimp_mailing_lists.get(MAILCHIMP_GENERAL_LIST, False)
def get_subscribe_osf_help_email(self, obj):
return obj.osf_mailing_lists.get(OSF_HELP_LIST, False)
links = LinksField({
'self': 'get_absolute_url',
})
def get_absolute_url(self, obj):
return absolute_reverse(
'users:user_settings',
kwargs={
'user_id': obj._id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class Meta:
type_ = 'user_settings'
class UserSettingsUpdateSerializer(UserSettingsSerializer):
id = IDField(source='_id', required=True)
two_factor_enabled = ser.BooleanField(write_only=True, required=False)
two_factor_verification = ser.IntegerField(write_only=True, required=False)
subscribe_osf_general_email = ser.BooleanField(read_only=False, required=False)
subscribe_osf_help_email = ser.BooleanField(read_only=False, required=False)
# Keys represent field names values represent the human readable names stored in DB.
MAP_MAIL = {
'subscribe_osf_help_email': OSF_HELP_LIST,
'subscribe_osf_general_email': MAILCHIMP_GENERAL_LIST,
}
def update_email_preferences(self, instance, attr, value):
if self.MAP_MAIL[attr] == OSF_HELP_LIST:
update_osf_help_mails_subscription(user=instance, subscribe=value)
else:
update_mailchimp_subscription(instance, self.MAP_MAIL[attr], value)
instance.save()
def update_two_factor(self, instance, value, two_factor_addon):
if value:
if not two_factor_addon:
two_factor_addon = instance.get_or_add_addon('twofactor')
two_factor_addon.save()
else:
auth = get_user_auth(self.context['request'])
instance.delete_addon('twofactor', auth=auth)
return two_factor_addon
def verify_two_factor(self, instance, value, two_factor_addon):
if not two_factor_addon:
raise exceptions.ValidationError(detail='Two-factor authentication is not enabled.')
if two_factor_addon.verify_code(value):
two_factor_addon.is_confirmed = True
else:
raise exceptions.PermissionDenied(detail='The two-factor verification code you provided is invalid.')
two_factor_addon.save()
def to_representation(self, instance):
"""
Overriding to_representation allows using different serializers for the request and response.
"""
context = self.context
return UserSettingsSerializer(instance=instance, context=context).data
def update(self, instance, validated_data):
for attr, value in validated_data.items():
if 'two_factor_enabled' == attr:
two_factor_addon = instance.get_addon('twofactor')
self.update_two_factor(instance, value, two_factor_addon)
elif 'two_factor_verification' == attr:
two_factor_addon = instance.get_addon('twofactor')
self.verify_two_factor(instance, value, two_factor_addon)
elif attr in self.MAP_MAIL.keys():
self.update_email_preferences(instance, attr, value)
return instance
class UserEmail(object):
def __init__(self, email_id, address, confirmed, primary):
self.id = email_id
self.address = address
self.confirmed = confirmed
self.primary = primary
class UserEmailsSerializer(JSONAPISerializer):
id = IDField(read_only=True)
type = TypeField()
email_address = ser.CharField(source='address')
confirmed = ser.BooleanField(read_only=True)
primary = ser.BooleanField(required=False)
links = LinksField({
'self': 'get_absolute_url',
})
def get_absolute_url(self, obj):
user = self.context['request'].user
return absolute_reverse(
'users:user-email-detail',
kwargs={
'user_id': user._id,
'email_id': obj.id,
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class Meta:
type_ = 'user_emails'
def create(self, validated_data):
user = self.context['request'].user
address = validated_data['address']
if address in user.unconfirmed_emails or address in user.emails.all().values_list('address', flat=True):
raise Conflict('This user already has registered with the email address {}'.format(address))
try:
token = user.add_unconfirmed_email(address)
user.save()
if CONFIRM_REGISTRATIONS_BY_EMAIL:
send_confirm_email(user, email=address)
except ValidationError as e:
raise exceptions.ValidationError(e.args[0])
return UserEmail(email_id=token, address=address, confirmed=False, primary=False)
def update(self, instance, validated_data):
user = self.context['request'].user
primary = validated_data.get('primary', None)
if primary and instance.confirmed:
user.username = instance.address
user.save()
elif primary and not instance.confirmed:
raise exceptions.ValidationError('You cannot set an unconfirmed email address as your primary email address.')
return instance
class UserNodeSerializer(NodeSerializer):
filterable_fields = NodeSerializer.filterable_fields | {'current_user_permissions'}
|
|
#!/home/mpvoss/PycharmProjects/PyDemo/env/bin/python
"""PILdriver, an image-processing calculator using PIL.
An instance of class PILDriver is essentially a software stack machine
(Polish-notation interpreter) for sequencing PIL image
transformations. The state of the instance is the interpreter stack.
The only method one will normally invoke after initialization is the
`execute' method. This takes an argument list of tokens, pushes them
onto the instance's stack, and then tries to clear the stack by
successive evaluation of PILdriver operators. Any part of the stack
not cleaned off persists and is part of the evaluation context for
the next call of the execute method.
PILDriver doesn't catch any exceptions, on the theory that these
are actually diagnostic information that should be interpreted by
the calling code.
When called as a script, the command-line arguments are passed to
a PILDriver instance. If there are no command-line arguments, the
module runs an interactive interpreter, each line of which is split into
space-separated tokens and passed to the execute method.
In the method descriptions below, a first line beginning with the string
`usage:' means this method can be invoked with the token that follows
it. Following <>-enclosed arguments describe how the method interprets
the entries on the stack. Each argument specification begins with a
type specification: either `int', `float', `string', or `image'.
All operations consume their arguments off the stack (use `dup' to
keep copies around). Use `verbose 1' to see the stack state displayed
before each operation.
Usage examples:
`show crop 0 0 200 300 open test.png' loads test.png, crops out a portion
of its upper-left-hand corner and displays the cropped portion.
`save rotated.png rotate 30 open test.tiff' loads test.tiff, rotates it
30 degrees, and saves the result as rotated.png (in PNG format).
"""
# by Eric S. Raymond <esr@thyrsus.com>
# $Id$
# TO DO:
# 1. Add PILFont capabilities, once that's documented.
# 2. Add PILDraw operations.
# 3. Add support for composing and decomposing multiple-image files.
#
from __future__ import print_function
from PIL import Image
class PILDriver(object):
verbose = 0
def do_verbose(self):
"""usage: verbose <int:num>
Set verbosity flag from top of stack.
"""
self.verbose = int(self.do_pop())
# The evaluation stack (internal only)
stack = [] # Stack of pending operations
def push(self, item):
"Push an argument onto the evaluation stack."
self.stack.insert(0, item)
def top(self):
"Return the top-of-stack element."
return self.stack[0]
# Stack manipulation (callable)
def do_clear(self):
"""usage: clear
Clear the stack.
"""
self.stack = []
def do_pop(self):
"""usage: pop
Discard the top element on the stack.
"""
return self.stack.pop(0)
def do_dup(self):
"""usage: dup
Duplicate the top-of-stack item.
"""
if hasattr(self, 'format'): # If it's an image, do a real copy
dup = self.stack[0].copy()
else:
dup = self.stack[0]
self.push(dup)
def do_swap(self):
"""usage: swap
Swap the top-of-stack item with the next one down.
"""
self.stack = [self.stack[1], self.stack[0]] + self.stack[2:]
# Image module functions (callable)
def do_new(self):
"""usage: new <int:xsize> <int:ysize> <int:color>:
Create and push a greyscale image of given size and color.
"""
xsize = int(self.do_pop())
ysize = int(self.do_pop())
color = int(self.do_pop())
self.push(Image.new("L", (xsize, ysize), color))
def do_open(self):
"""usage: open <string:filename>
Open the indicated image, read it, push the image on the stack.
"""
self.push(Image.open(self.do_pop()))
def do_blend(self):
"""usage: blend <image:pic1> <image:pic2> <float:alpha>
Replace two images and an alpha with the blended image.
"""
image1 = self.do_pop()
image2 = self.do_pop()
alpha = float(self.do_pop())
self.push(Image.blend(image1, image2, alpha))
def do_composite(self):
"""usage: composite <image:pic1> <image:pic2> <image:mask>
Replace two images and a mask with their composite.
"""
image1 = self.do_pop()
image2 = self.do_pop()
mask = self.do_pop()
self.push(Image.composite(image1, image2, mask))
def do_merge(self):
"""usage: merge <string:mode> <image:pic1>
[<image:pic2> [<image:pic3> [<image:pic4>]]]
Merge top-of stack images in a way described by the mode.
"""
mode = self.do_pop()
bandlist = []
for band in mode:
bandlist.append(self.do_pop())
self.push(Image.merge(mode, bandlist))
# Image class methods
def do_convert(self):
"""usage: convert <string:mode> <image:pic1>
Convert the top image to the given mode.
"""
mode = self.do_pop()
image = self.do_pop()
self.push(image.convert(mode))
def do_copy(self):
"""usage: copy <image:pic1>
Make and push a true copy of the top image.
"""
self.dup()
def do_crop(self):
"""usage: crop <int:left> <int:upper> <int:right> <int:lower>
<image:pic1>
Crop and push a rectangular region from the current image.
"""
left = int(self.do_pop())
upper = int(self.do_pop())
right = int(self.do_pop())
lower = int(self.do_pop())
image = self.do_pop()
self.push(image.crop((left, upper, right, lower)))
def do_draft(self):
"""usage: draft <string:mode> <int:xsize> <int:ysize>
Configure the loader for a given mode and size.
"""
mode = self.do_pop()
xsize = int(self.do_pop())
ysize = int(self.do_pop())
self.push(self.draft(mode, (xsize, ysize)))
def do_filter(self):
"""usage: filter <string:filtername> <image:pic1>
Process the top image with the given filter.
"""
from PIL import ImageFilter
imageFilter = getattr(ImageFilter, self.do_pop().upper())
image = self.do_pop()
self.push(image.filter(imageFilter))
def do_getbbox(self):
"""usage: getbbox
Push left, upper, right, and lower pixel coordinates of the top image.
"""
bounding_box = self.do_pop().getbbox()
self.push(bounding_box[3])
self.push(bounding_box[2])
self.push(bounding_box[1])
self.push(bounding_box[0])
def do_getextrema(self):
"""usage: extrema
Push minimum and maximum pixel values of the top image.
"""
extrema = self.do_pop().extrema()
self.push(extrema[1])
self.push(extrema[0])
def do_offset(self):
"""usage: offset <int:xoffset> <int:yoffset> <image:pic1>
Offset the pixels in the top image.
"""
xoff = int(self.do_pop())
yoff = int(self.do_pop())
image = self.do_pop()
self.push(image.offset(xoff, yoff))
def do_paste(self):
"""usage: paste <image:figure> <int:xoffset> <int:yoffset>
<image:ground>
Paste figure image into ground with upper left at given offsets.
"""
figure = self.do_pop()
xoff = int(self.do_pop())
yoff = int(self.do_pop())
ground = self.do_pop()
if figure.mode == "RGBA":
ground.paste(figure, (xoff, yoff), figure)
else:
ground.paste(figure, (xoff, yoff))
self.push(ground)
def do_resize(self):
"""usage: resize <int:xsize> <int:ysize> <image:pic1>
Resize the top image.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
image = self.do_pop()
self.push(image.resize((xsize, ysize)))
def do_rotate(self):
"""usage: rotate <int:angle> <image:pic1>
Rotate image through a given angle
"""
angle = int(self.do_pop())
image = self.do_pop()
self.push(image.rotate(angle))
def do_save(self):
"""usage: save <string:filename> <image:pic1>
Save image with default options.
"""
filename = self.do_pop()
image = self.do_pop()
image.save(filename)
def do_save2(self):
"""usage: save2 <string:filename> <string:options> <image:pic1>
Save image with specified options.
"""
filename = self.do_pop()
options = self.do_pop()
image = self.do_pop()
image.save(filename, None, options)
def do_show(self):
"""usage: show <image:pic1>
Display and pop the top image.
"""
self.do_pop().show()
def do_thumbnail(self):
"""usage: thumbnail <int:xsize> <int:ysize> <image:pic1>
Modify the top image in the stack to contain a thumbnail of itself.
"""
ysize = int(self.do_pop())
xsize = int(self.do_pop())
self.top().thumbnail((xsize, ysize))
def do_transpose(self):
"""usage: transpose <string:operator> <image:pic1>
Transpose the top image.
"""
transpose = self.do_pop().upper()
image = self.do_pop()
self.push(image.transpose(transpose))
# Image attributes
def do_format(self):
"""usage: format <image:pic1>
Push the format of the top image onto the stack.
"""
self.push(self.do_pop().format)
def do_mode(self):
"""usage: mode <image:pic1>
Push the mode of the top image onto the stack.
"""
self.push(self.do_pop().mode)
def do_size(self):
"""usage: size <image:pic1>
Push the image size on the stack as (y, x).
"""
size = self.do_pop().size
self.push(size[0])
self.push(size[1])
# ImageChops operations
def do_invert(self):
"""usage: invert <image:pic1>
Invert the top image.
"""
from PIL import ImageChops
self.push(ImageChops.invert(self.do_pop()))
def do_lighter(self):
"""usage: lighter <image:pic1> <image:pic2>
Pop the two top images, push an image of the lighter pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.lighter(image1, image2))
def do_darker(self):
"""usage: darker <image:pic1> <image:pic2>
Pop the two top images, push an image of the darker pixels of both.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.darker(image1, image2))
def do_difference(self):
"""usage: difference <image:pic1> <image:pic2>
Pop the two top images, push the difference image
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.difference(image1, image2))
def do_multiply(self):
"""usage: multiply <image:pic1> <image:pic2>
Pop the two top images, push the multiplication image.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
self.push(ImageChops.multiply(image1, image2))
def do_screen(self):
"""usage: screen <image:pic1> <image:pic2>
Pop the two top images, superimpose their inverted versions.
"""
from PIL import ImageChops
image2 = self.do_pop()
image1 = self.do_pop()
self.push(ImageChops.screen(image1, image2))
def do_add(self):
"""usage: add <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled sum with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.add(image1, image2, scale, offset))
def do_subtract(self):
"""usage: subtract <image:pic1> <image:pic2> <int:offset> <float:scale>
Pop the two top images, produce the scaled difference with offset.
"""
from PIL import ImageChops
image1 = self.do_pop()
image2 = self.do_pop()
scale = float(self.do_pop())
offset = int(self.do_pop())
self.push(ImageChops.subtract(image1, image2, scale, offset))
# ImageEnhance classes
def do_color(self):
"""usage: color <image:pic1>
Enhance color in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Color(image)
self.push(enhancer.enhance(factor))
def do_contrast(self):
"""usage: contrast <image:pic1>
Enhance contrast in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Contrast(image)
self.push(enhancer.enhance(factor))
def do_brightness(self):
"""usage: brightness <image:pic1>
Enhance brightness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Brightness(image)
self.push(enhancer.enhance(factor))
def do_sharpness(self):
"""usage: sharpness <image:pic1>
Enhance sharpness in the top image.
"""
from PIL import ImageEnhance
factor = float(self.do_pop())
image = self.do_pop()
enhancer = ImageEnhance.Sharpness(image)
self.push(enhancer.enhance(factor))
# The interpreter loop
def execute(self, list):
"Interpret a list of PILDriver commands."
list.reverse()
while len(list) > 0:
self.push(list[0])
list = list[1:]
if self.verbose:
print("Stack: " + repr(self.stack))
top = self.top()
if not isinstance(top, str):
continue
funcname = "do_" + top
if not hasattr(self, funcname):
continue
else:
self.do_pop()
func = getattr(self, funcname)
func()
if __name__ == '__main__':
import sys
# If we see command-line arguments, interpret them as a stack state
# and execute. Otherwise go interactive.
driver = PILDriver()
if len(sys.argv[1:]) > 0:
driver.execute(sys.argv[1:])
else:
print("PILDriver says hello.")
while True:
try:
if sys.version_info[0] >= 3:
line = input('pildriver> ')
else:
line = raw_input('pildriver> ')
except EOFError:
print("\nPILDriver says goodbye.")
break
driver.execute(line.split())
print(driver.stack)
# The following sets edit modes for GNU EMACS
# Local Variables:
# mode:python
# End:
|
|
#!/usr/bin/env python
import os
import tempfile
import unittest
import time
import math
import re
import platform
import uuid
import pwd
import signal
from functools import partial
from multiprocessing import Process, Event
from ast import literal_eval
import psutil
from thrift.protocol import TBinaryProtocol
from plow.rndaemon import conf
conf.NETWORK_DISABLED = True
from plow.rndaemon.rpc import ttypes, RndServiceApi
from plow.rndaemon import core, server, client, utils
import logging
logging.basicConfig(level=logging.DEBUG)
conf.TASK_PROXY_USER = os.getenv('PLOW_PROXY_USER', conf.TASK_PROXY_USER)
ROOT = os.path.abspath(os.path.dirname(__file__))
CMDS_UTIL = os.path.join(ROOT, 'utils/cmds.py')
DATA_DIR = os.path.join(ROOT, 'data')
IS_LINUX = platform.system() in ('FreeBSD', 'Linux')
class TestResourceManager(unittest.TestCase):
def tearDown(self):
print "\n"
print "="*60, "\n"
def testCoreCheckout(self):
manager = core.ResourceMgr
totalCores = core.Profiler.physicalCpus
slots = len(manager.getSlots())
self.assertEqual(totalCores, slots)
slots = len(manager.getOpenSlots())
self.assertEqual(totalCores, slots)
slots = []
for i in xrange(1, totalCores + 1):
slots += manager.checkout(1)
total = totalCores - i
openslots = len(manager.getOpenSlots())
self.assertEqual(total, openslots)
manager.checkin(slots)
openslots = len(manager.getOpenSlots())
self.assertEqual(totalCores, openslots)
class TestProcessManager(unittest.TestCase):
_logdir = tempfile.gettempdir()
_totalCores = core.Profiler.physicalCpus
def setUp(self):
self._logfile = tempfile.mktemp('.log', 'plow-test-')
self._processmgr_processFinished = core.ProcessMgr.processFinished
def tearDown(self):
# give these types of tests a moment to close down
time.sleep(1)
core.ProcessMgr.processFinished = self._processmgr_processFinished
print "\n"
print "="*60, "\n"
#
# Tests
#
def testRunTaskCommand(self):
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(status, 0, "Expected a 0 Exit Status, but got %s" % status)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandHalfCores(self):
if self._totalCores < 3:
return
cores = int(math.ceil(self._totalCores * .5))
process = self.getNewTaskCommand()
process.cores = cores
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandMaxCores(self):
process = self.getNewTaskCommand()
process.cores = self._totalCores
process.command = [CMDS_UTIL, 'cpu_affinity']
core.ProcessMgr.runProcess(process)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
self.cpuAffinityTestUtil(process)
def testRunTaskCommandOutOfCores(self):
process = self.getNewTaskCommand()
process.cores = self._totalCores + 1
process.command = ["/bin/ls", self._logdir]
self.assertRaises(ttypes.RndException, core.ProcessMgr.runProcess, process)
def testKillRunningTask(self):
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'hard_to_kill']
core.ProcessMgr.runProcess(process)
time.sleep(1)
runningTasks = core.ProcessMgr.getRunningTasks()
total = len(runningTasks)
self.assertEqual(total, 1, msg="Expected there to be one running task")
task = runningTasks[0]
core.ProcessMgr.killRunningTask(task.procId, "Killing for testing reasons")
time.sleep(1)
count = len(core.ProcessMgr.getRunningTasks())
self.assertEqual(count, 0,
msg="Expected 0 running tasks but got %s" % count)
i = 0
while core.ProcessMgr.getRunningTasks():
time.sleep(.5)
self.assertTrue(i < 10,
"Tasks are still running when they should be dead by now")
i += 1
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(status, 1, "Expected a 0 Exit Status, but got %s" % status)
assert abs(sig) in (signal.SIGTERM, signal.SIGKILL), "Expected a 9 or 15 Exit Signal, but got %s" % sig
def testFailedTask(self):
D = {'result': None}
def processFinished(d, *args):
d['result'] = args[0]
self._processmgr_processFinished(*args)
core.ProcessMgr.processFinished = partial(processFinished, D)
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'crashing']
task = core.ProcessMgr.runProcess(process, wait=5)
ppid = task.pid
# self.assertNotEqual(ppid, -1, "Procss never started properly")
try:
psutil.Process(ppid).wait(5)
except psutil.TimeoutExpired:
self.fail("Task should not still be running: %s" % task)
except psutil.NoSuchProcess:
pass
i = 0
while core.ProcessMgr.getRunningTasks():
time.sleep(.5)
self.assertTrue(i < 10,
"Tasks are still running when they should be dead by now")
i += 1
sig, status = self.getLogSignalStatus(process.logFile)
self.assertEqual(sig, 0)
self.assertEqual(status, 1)
self.assertTrue(D['result'] is not None, "Result was %r" % D)
self.assertEqual(D['result'].exitStatus, 1)
self.assertEqual(D['result'].exitSignal, 0)
def testTaskProgress(self):
# disable the callback
D = {'result': None}
def processFinished(d, *args):
d['result'] = args[0]
conf.TASK_PROGRESS_PATTERNS = {
'blender': '^Fra:\\d+ .*? \\| Rendering \\| .*? (\\d+/\\d+)$',
'mray': '^JOB[\\w. ]+:\\s+([\\d.]+%)\\s+'
}
core.ProcessMgr.processFinished = partial(processFinished, D)
process = self.getNewTaskCommand()
process.taskTypes = ['blender', 'mray']
for log in ('blender.log', 'mentalRay.log'):
process.command = [CMDS_UTIL, 'echo_log', os.path.join(DATA_DIR, log)]
t = core._ProcessThread(process, cpus=[0])
running = t.getRunningTask()
self.assertEqual(running.progress, 0,
'Initial progress for "%s" job should be 0' % log)
running.lastLog = None
repr(running)
t.start()
t.join()
self.assertTrue(D['result'] is not None)
self.assertEqual(D['result'].exitStatus, 0)
running = t.getRunningTask()
self.assertEqual(running.progress, 1,
'Final progress for "%s" job should be 1. Got %s' \
% (log, running.progress))
D['result'] = None
def testTaskShutdown(self):
procs = []
for slot in core.ResourceMgr.getOpenSlots():
process = self.getNewTaskCommand()
process.command = [CMDS_UTIL, 'hard_to_kill']
core.ProcessMgr.runProcess(process, wait=1)
procs.append(process)
time.sleep(1)
core.ProcessMgr.shutdown()
for proc in procs:
sig, status = self.getLogSignalStatus(proc.logFile)
self.assertEqual(status, 1, "Expected 1 Exit Status, but got %s" % status)
self.assertEqual(sig, 86, "Expected 86 Exit Signal, but got %s" % sig)
def testPingPong(self):
process = self.getNewTaskCommand()
process.command = ["sleep", ".25"]
core.ProcessMgr.runProcess(process)
handler = server.RndProcessHandler()
ping = handler.pingPong()
self.assertFalse(ping.tasks, "Expected and empty task list")
ping = handler.pingPong(withTasks=True)
self.assertTrue(ping.tasks, "Expected to find a running task in ping result")
logging.debug("PingPong: %r", ping)
while core.ProcessMgr.getRunningTasks():
time.sleep(.1)
#
# Utils
#
def getNewTaskCommand(self):
process = ttypes.RunTaskCommand()
process.procId = uuid.uuid4()
process.taskId = uuid.uuid4()
process.cores = 1
process.uid = os.geteuid()
process.username = pwd.getpwuid(process.uid).pw_name
process.env = {}
process.logFile = self._logfile
return process
def cpuAffinityTestUtil(self, process):
captured_affinity = tuple(self.getLogCpuAffinity(process.logFile))
count = len(captured_affinity)
self.assertTrue(count == 1, "Expected only 1 result. Got %d" % count)
if IS_LINUX:
captured = captured_affinity[0]
cpu_set = set()
logical_cpus = core.Profiler.cpuprofile.logical_cpus
for i in xrange(process.cores):
cpu_set.update(logical_cpus[i])
cpu_tuple = tuple(cpu_set)
self.assertEqual(captured, cpu_tuple,
'Captured cpu affinity %s does not match expected %s' % (cpu_tuple, captured))
@staticmethod
def getLogSignalStatus(logfile):
status = None
signal = None
status_field = 'Exit Status:'
signal_field = 'Signal:'
with open(logfile) as f:
for line in f:
if line.startswith(status_field):
try:
status = int(line.split(status_field, 1)[-1])
except:
pass
elif line.startswith(signal_field):
try:
signal = int(line.split(signal_field, 1)[-1])
except:
pass
return signal, status
@staticmethod
def getLogCpuAffinity(logfile):
affinity = set()
with open(logfile) as f:
for line in f:
match = re.search(r'cpu_affinity == (\([\d, ]+\))', line)
if match:
try:
cpus = literal_eval(match.group(1))
except:
continue
affinity.add(cpus)
return affinity
class TestCommunications(unittest.TestCase):
"""
Creates a mock server to accept communication tests
from the client API
"""
def setUp(self):
self.event = Event()
self.server_port = 21212
handler = _ServiceHandler(self.event)
prot = TBinaryProtocol.TBinaryProtocolAcceleratedFactory()
self.server = server.get_server(RndServiceApi,
handler,
self.server_port,
protocol=prot)
self.t_server = Process(target=self.server.serve)
self.t_server.daemon = True
self.t_server.start()
time.sleep(.1)
def tearDown(self):
self.t_server.terminate()
time.sleep(1)
print "\n"
print "="*60, "\n"
def testSendPing(self):
"""
Get a connection to the local "server" and test
that it receives a ping.
"""
ping = ttypes.Ping()
ping.hw = ttypes.Hardware()
service, transport = client.getPlowConnection("localhost", self.server_port)
service.sendPing(ping)
self.event.wait(3)
self.assertTrue(
self.event.is_set(),
msg="Server did not receive ping from client in reasonable time")
transport.close()
class _ServiceHandler(object):
def __init__(self, evt):
self.event = evt
def sendPing(self, ping):
self.event.set()
class TestLogParser(unittest.TestCase):
def testProgressStatic(self):
parser = utils.ProcessLogParser([
'^Fra:\d+ .*? \| Rendering \| .*? (\d+/\d+)$',
'^JOB[\w. ]+:\s+([\d.]+%)\s+'])
logtests = {
'blender.log': {
'total': 42,
'indexes': [(0, 0.0), (5, .4375), (20, .671875), (30, .828125), (-1, 1.0)]
},
'mentalRay.log': {
'total': 300,
'indexes': [(0, .003), (20, .07), (50, .17), (150, .503), (250, .836), (-1, 1.0)]
}
}
for name, attribs in logtests.iteritems():
log = os.path.join(DATA_DIR, name)
progs = []
for line in open(log):
val = parser.parseProgress(line)
if val is not None:
progs.append(val)
total = attribs['total']
found = len(progs)
self.assertEqual(found, total, "Expected %d progress updates. Got %d" % (total, found))
for idx, val in attribs['indexes']:
self.assertEqual(progs[idx], val)
if __name__ == "__main__":
suite = unittest.TestSuite()
for t in (TestCommunications, TestResourceManager, TestProcessManager):
suite.addTest(unittest.TestLoader().loadTestsFromTestCase(t))
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
import synapseclient
from synapseclient import File, Project, Folder, Table, Schema, Link, Wiki, Entity, Activity
from synapseclient.core.cache import Cache
from synapseclient.core.exceptions import SynapseHTTPError
from synapseclient.core.constants.limits import MAX_FILE_HANDLE_PER_COPY_REQUEST
import re
import json
import itertools
import math
############################################################
# Copy Functions #
############################################################
def copyFileHandles(syn, fileHandles, associateObjectTypes, associateObjectIds,
newContentTypes=None, newFileNames=None):
"""
Given a list of fileHandle Ids or Objects, copy the fileHandles
:param syn: A Synapse object with user's login, e.g. syn = synapseclient.login()
:param fileHandles: List of fileHandle Ids or Objects
:param associateObjectTypes: List of associated object types: FileEntity, TableEntity, WikiAttachment,
UserProfileAttachment, MessageAttachment, TeamAttachment, SubmissionAttachment,
VerificationSubmission (Must be the same length as fileHandles)
:param associateObjectIds: List of associated object Ids: If copying a file, the objectId is the synapse id,
and if copying a wiki attachment, the object id is the wiki subpage id.
(Must be the same length as fileHandles)
:param newContentTypes: (Optional) List of content types. Set each item to a new content type for each file
handle, or leave the item as None to keep the original content type. Default None,
which keeps all original content types.
:param newFileNames: (Optional) List of filenames. Set each item to a new filename for each file handle,
or leave the item as None to keep the original name. Default None, which keeps all
original file names.
:return: List of batch filehandle copy results, can include failureCodes: UNAUTHORIZED and
NOT_FOUND
:raises ValueError: If length of all input arguments are not the same
"""
# Check if length of all inputs are equal
if not (len(fileHandles) == len(associateObjectTypes) == len(associateObjectIds)
and (newContentTypes is None or len(newContentTypes) == len(associateObjectIds))
and (newFileNames is None or len(newFileNames) == len(associateObjectIds))):
raise ValueError("Length of all input arguments must be the same")
# If no optional params passed, assign to empty list
if newContentTypes is None:
newContentTypes = []
if newFileNames is None:
newFileNames = []
# Remove this line if we change API to only take fileHandleIds and not Objects
file_handle_ids = [synapseclient.core.utils.id_of(handle) for handle in fileHandles]
# division logic for POST call here
master_copy_results_list = [] # list which holds all results from POST call
for batch_file_handles_ids, batch_assoc_obj_types, batch_assoc_obj_ids, batch_con_type, batch_file_name \
in _batch_iterator_generator([file_handle_ids, associateObjectTypes, associateObjectIds,
newContentTypes, newFileNames], MAX_FILE_HANDLE_PER_COPY_REQUEST):
batch_copy_results = _copy_file_handles_batch(syn, batch_file_handles_ids, batch_assoc_obj_types,
batch_assoc_obj_ids, batch_con_type, batch_file_name)
master_copy_results_list.extend(batch_copy_results)
return master_copy_results_list
def _copy_file_handles_batch(self, file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names):
"""
Given a list of fileHandle Ids, copy the fileHandles. This helper makes the POST call and returns the
results as a list.
:param self: A Synapse object with user's login, e.g. syn = synapseclient.login()
:param file_handle_ids: List of fileHandle Ids or Objects
:param obj_types: List of associated object types: FileEntity, TableEntity, WikiAttachment,
UserProfileAttachment, MessageAttachment, TeamAttachment, SubmissionAttachment,
VerificationSubmission (Must be the same length as fileHandles)
:param obj_ids: List of associated object Ids: If copying a file, the objectId is the synapse id,
and if copying a wiki attachment, the object id is the wiki subpage id.
(Must be the same length as fileHandles)
:param new_con_types: (Optional) List of content types (Can change a filetype of a filehandle).
:param new_file_names: (Optional) List of filenames (Can change a filename of a filehandle).
:return: List of batch filehandle copy results, can include failureCodes: UNAUTHORIZED and
NOT_FOUND
"""
copy_file_handle_request = _create_batch_file_handle_copy_request(file_handle_ids, obj_types, obj_ids,
new_con_types, new_file_names)
# make backend call which performs the copy specified by copy_file_handle_request
copied_file_handles = self.restPOST('/filehandles/copy', body=json.dumps(copy_file_handle_request),
endpoint=self.fileHandleEndpoint)
return copied_file_handles.get("copyResults")
def _create_batch_file_handle_copy_request(file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names):
"""
Returns json for file handle copy request
:param file_handle_ids: List of fileHandle Ids
:param obj_types: List of associated object types: FileEntity, TableEntity, WikiAttachment,
UserProfileAttachment, MessageAttachment, TeamAttachment, SubmissionAttachment,
VerificationSubmission (Must be the same length as fileHandles)
:param obj_ids: List of associated object Ids: If copying a file, the objectId is the synapse id,
and if copying a wiki attachment, the object id is the wiki subpage id.
(Must be the same length as fileHandles)
:param new_con_types: List of content types (Can change a filetype of a filehandle).
:param new_file_names: List of filenames (Can change a filename of a filehandle).
:return: JSON for API call to POST/ filehandles/ copy
"""
copy_file_handle_request = {"copyRequests": []}
for file_handle_id, obj_type, obj_id, new_con_type, new_file_name \
in itertools.zip_longest(file_handle_ids, obj_types, obj_ids, new_con_types, new_file_names):
# construct JSON object for REST call
curr_dict = {
"originalFile": {
"fileHandleId": file_handle_id,
"associateObjectId": obj_id,
"associateObjectType": obj_type
},
"newContentType": new_con_type,
"newFileName": new_file_name
}
# add copy request to list of requests
copy_file_handle_request["copyRequests"].append(curr_dict)
return copy_file_handle_request
def _batch_iterator_generator(iterables, batch_size):
"""
Returns a generator over each of the iterable objects in the list iterables
:param iterables: List of iterable objects, all must be same length, len(iterables) >= 1
:param batch_size: Integer representing the size of the batch, batch_size >= 1
:return: Generator which yields a list of batches for each iterable in iterables
:raises ValueError: If len(iterables) < 1
example: _batch_iterator_generator(["ABCDEFG"], 3) --> ["ABC"] ["DEF"] ["G"], on successive calls to next()
_batch_iterator_generator([[1, 2, 3], [4, 5, 6]], 2) --> [[1, 2], [4, 5]] [[3], [6]]
"""
if len(iterables) < 1:
raise ValueError("Must provide at least one iterable in iterables, i.e. len(iterables) >= 1")
num_batches = math.ceil(len(iterables[0]) / batch_size)
for i in range(num_batches):
start = i * batch_size
end = start + batch_size
yield [iterables[i][start:end] for i in range(len(iterables))]
def _copy_cached_file_handles(cache, copiedFileHandles):
# type: (Cache , dict) -> None
for copy_result in copiedFileHandles:
if copy_result.get('failureCode') is None: # sucessfully copied
original_cache_path = cache.get(copy_result['originalFileHandleId'])
if original_cache_path:
cache.add(copy_result['newFileHandle']['id'], original_cache_path)
def changeFileMetaData(syn, entity, downloadAs=None, contentType=None):
"""
:param entity: Synapse entity Id or object
:param contentType: Specify content type to change the content type of a filehandle
:param downloadAs: Specify filename to change the filename of a filehandle
:return: Synapse Entity
Can be used to change the filename or the file content-type without downloading::
file_entity = syn.get(synid)
print(os.path.basename(file_entity.path)) ## prints, e.g., "my_file.txt"
file_entity = synapseutils.changeFileMetaData(syn, file_entity, "my_new_name_file.txt")
"""
ent = syn.get(entity, downloadFile=False)
fileResult = syn._getFileHandleDownload(ent.dataFileHandleId, ent.id)
ent.contentType = ent.contentType if contentType is None else contentType
downloadAs = fileResult['fileHandle']['fileName'] if downloadAs is None else downloadAs
copiedFileHandle = copyFileHandles(syn, [ent.dataFileHandleId], [ent.concreteType.split(".")[-1]], [ent.id],
[contentType], [downloadAs])
copyResult = copiedFileHandle[0]
if copyResult.get("failureCode") is not None:
raise ValueError("%s dataFileHandleId: %s" % (copyResult["failureCode"], copyResult['originalFileHandleId']))
ent.dataFileHandleId = copyResult['newFileHandle']['id']
ent = syn.store(ent)
return ent
def copy(syn, entity, destinationId, skipCopyWikiPage=False, skipCopyAnnotations=False, **kwargs):
"""
- This function will assist users in copying entities (Tables, Links, Files, Folders, Projects),
and will recursively copy everything in directories.
- A Mapping of the old entities to the new entities will be created and all the wikis of each entity
will also be copied over and links to synapse Ids will be updated.
:param syn: A synapse object: syn = synapseclient.login()- Must be logged into synapse
:param entity: A synapse entity ID
:param destinationId: Synapse ID of a folder/project that the copied entity is being copied to
:param skipCopyWikiPage: Skip copying the wiki pages
Default is False
:param skipCopyAnnotations: Skips copying the annotations
Default is False
Examples::
import synapseutils
import synapseclient
syn = synapseclient.login()
synapseutils.copy(syn, ...)
Examples and extra parameters unique to each copy function
-- COPYING FILES
:param version: Can specify version of a file.
Default to None
:param updateExisting: When the destination has an entity that has the same name,
users can choose to update that entity.
It must be the same entity type
Default to False
:param setProvenance: Has three values to set the provenance of the copied entity:
traceback: Sets to the source entity
existing: Sets to source entity's original provenance (if it exists)
None: No provenance is set
Examples::
synapseutils.copy(syn, "syn12345", "syn45678", updateExisting=False, setProvenance = "traceback",version=None)
-- COPYING FOLDERS/PROJECTS
:param excludeTypes: Accepts a list of entity types (file, table, link) which determines which entity types to
not copy.
Defaults to an empty list.
Examples::
#This will copy everything in the project into the destinationId except files and tables.
synapseutils.copy(syn, "syn123450","syn345678",excludeTypes=["file","table"])
:returns: a mapping between the original and copied entity: {'syn1234':'syn33455'}
"""
updateLinks = kwargs.get('updateLinks', True)
updateSynIds = kwargs.get('updateSynIds', True)
entitySubPageId = kwargs.get('entitySubPageId', None)
destinationSubPageId = kwargs.get('destinationSubPageId', None)
mapping = _copyRecursive(syn, entity, destinationId, skipCopyAnnotations=skipCopyAnnotations, **kwargs)
if not skipCopyWikiPage:
for oldEnt in mapping:
copyWiki(syn, oldEnt, mapping[oldEnt], entitySubPageId=entitySubPageId,
destinationSubPageId=destinationSubPageId, updateLinks=updateLinks,
updateSynIds=updateSynIds, entityMap=mapping)
return mapping
def _copyRecursive(syn, entity, destinationId, mapping=None, skipCopyAnnotations=False, **kwargs):
"""
Recursively copies synapse entites, but does not copy the wikis
:param entity: A synapse entity ID
:param destinationId: Synapse ID of a folder/project that the copied entity is being copied to
:param skipCopyAnnotations: Skips copying the annotations
Default is False
:returns: a mapping between the original and copied entity: {'syn1234':'syn33455'}
"""
version = kwargs.get('version', None)
setProvenance = kwargs.get('setProvenance', "traceback")
excludeTypes = kwargs.get('excludeTypes', [])
updateExisting = kwargs.get('updateExisting', False)
if mapping is None:
mapping = dict()
# Check that passed in excludeTypes is file, table, and link
if not isinstance(excludeTypes, list):
raise ValueError("Excluded types must be a list")
elif not all([i in ["file", "link", "table"] for i in excludeTypes]):
raise ValueError("Excluded types can only be a list of these values: file, table, and link")
ent = syn.get(entity, downloadFile=False)
if ent.id == destinationId:
raise ValueError("destinationId cannot be the same as entity id")
if (isinstance(ent, Project) or isinstance(ent, Folder)) and version is not None:
raise ValueError("Cannot specify version when copying a project of folder")
if not isinstance(ent, (Project, Folder, File, Link, Schema, Entity)):
raise ValueError("Not able to copy this type of file")
permissions = syn.restGET("/entity/{}/permissions".format(ent.id))
# Don't copy entities without DOWNLOAD permissions
if not permissions['canDownload']:
print("%s not copied - this file lacks download permission" % ent.id)
return mapping
access_requirements = syn.restGET('/entity/{}/accessRequirement'.format(ent.id))
# If there are any access requirements, don't copy files
if access_requirements['results']:
print("{} not copied - this file has access restrictions".format(ent.id))
return mapping
copiedId = None
if isinstance(ent, Project):
if not isinstance(syn.get(destinationId), Project):
raise ValueError("You must give a destinationId of a new project to copy projects")
copiedId = destinationId
# Projects include Docker repos, and Docker repos cannot be copied
# with the Synapse rest API. Entity views currently also aren't
# supported
entities = syn.getChildren(entity, includeTypes=['folder', 'file',
'table', 'link'])
for i in entities:
mapping = _copyRecursive(syn, i['id'], destinationId, mapping=mapping,
skipCopyAnnotations=skipCopyAnnotations, **kwargs)
elif isinstance(ent, Folder):
copiedId = _copyFolder(syn, ent.id, destinationId, mapping=mapping, skipCopyAnnotations=skipCopyAnnotations,
**kwargs)
elif isinstance(ent, File) and "file" not in excludeTypes:
copiedId = _copyFile(syn, ent.id, destinationId, version=version, updateExisting=updateExisting,
setProvenance=setProvenance, skipCopyAnnotations=skipCopyAnnotations)
elif isinstance(ent, Link) and "link" not in excludeTypes:
copiedId = _copyLink(syn, ent.id, destinationId, updateExisting=updateExisting)
elif isinstance(ent, Schema) and "table" not in excludeTypes:
copiedId = _copyTable(syn, ent.id, destinationId, updateExisting=updateExisting)
# This is currently done because copyLink returns None sometimes
if copiedId is not None:
mapping[ent.id] = copiedId
print("Copied %s to %s" % (ent.id, copiedId))
else:
print("%s not copied" % ent.id)
return mapping
def _copyFolder(syn, entity, destinationId, mapping=None, skipCopyAnnotations=False, **kwargs):
"""
Copies synapse folders
:param entity: A synapse ID of a Folder entity
:param destinationId: Synapse ID of a project/folder that the folder wants to be copied to
:param skipCopyAnnotations: Skips copying the annotations
Default is False
"""
oldFolder = syn.get(entity)
updateExisting = kwargs.get('updateExisting', False)
if mapping is None:
mapping = dict()
# CHECK: If Folder name already exists, raise value error
if not updateExisting:
existingEntity = syn.findEntityId(oldFolder.name, parent=destinationId)
if existingEntity is not None:
raise ValueError('An entity named "%s" already exists in this location. Folder could not be copied'
% oldFolder.name)
newFolder = Folder(name=oldFolder.name, parent=destinationId)
if not skipCopyAnnotations:
newFolder.annotations = oldFolder.annotations
newFolder = syn.store(newFolder)
entities = syn.getChildren(entity)
for ent in entities:
_copyRecursive(syn, ent['id'], newFolder.id, mapping, skipCopyAnnotations=skipCopyAnnotations, **kwargs)
return newFolder.id
def _copyFile(syn, entity, destinationId, version=None, updateExisting=False, setProvenance="traceback",
skipCopyAnnotations=False):
"""
Copies most recent version of a file to a specified synapse ID.
:param entity: A synapse ID of a File entity
:param destinationId: Synapse ID of a folder/project that the file wants to be copied to
:param version: Can specify version of a file.
Default to None
:param updateExisting: Can choose to update files that have the same name
Default to False
:param setProvenance: Has three values to set the provenance of the copied entity:
traceback: Sets to the source entity
existing: Sets to source entity's original provenance (if it exists)
None: No provenance is set
:param skipCopyAnnotations: Skips copying the annotations
Default is False
"""
ent = syn.get(entity, downloadFile=False, version=version, followLink=False)
# CHECK: If File is in the same parent directory (throw an error) (Can choose to update files)
if not updateExisting:
existingEntity = syn.findEntityId(ent.name, parent=destinationId)
if existingEntity is not None:
raise ValueError('An entity named "%s" already exists in this location. File could not be copied'
% ent.name)
profile = syn.getUserProfile()
# get provenance earlier to prevent errors from being called in the end
# If traceback, set activity to old entity
if setProvenance == "traceback":
act = Activity("Copied file", used=ent)
# if existing, check if provenance exists
elif setProvenance == "existing":
try:
act = syn.getProvenance(ent.id)
except SynapseHTTPError as e:
if e.response.status_code == 404:
act = None
else:
raise e
elif setProvenance is None or setProvenance.lower() == 'none':
act = None
else:
raise ValueError('setProvenance must be one of None, existing, or traceback')
# Grab entity bundle
bundle = syn._getEntityBundle(ent.id, version=ent.versionNumber, requestedObjects={'includeEntity': True,
'includeFileHandles': True})
fileHandle = synapseclient.core.utils.find_data_file_handle(bundle)
createdBy = fileHandle['createdBy']
# CHECK: If the user created the file, copy the file by using fileHandleId else copy the fileHandle
if profile.ownerId == createdBy:
newdataFileHandleId = ent.dataFileHandleId
else:
copiedFileHandle = copyFileHandles(syn, [fileHandle], ["FileEntity"], [bundle['entity']['id']],
[fileHandle['contentType']], [fileHandle['fileName']])
# Check if failurecodes exist
copyResult = copiedFileHandle[0]
if copyResult.get("failureCode") is not None:
raise ValueError("%s dataFileHandleId: %s" % (copyResult["failureCode"],
copyResult['originalFileHandleId']))
newdataFileHandleId = copyResult['newFileHandle']['id']
new_ent = File(dataFileHandleId=newdataFileHandleId, name=ent.name, parentId=destinationId)
# Set annotations here
if not skipCopyAnnotations:
new_ent.annotations = ent.annotations
# Store provenance if act is not None
if act is not None:
new_ent = syn.store(new_ent, activity=act)
else:
new_ent = syn.store(new_ent)
# Leave this return statement for test
return new_ent['id']
def _copyTable(syn, entity, destinationId, updateExisting=False):
"""
Copies synapse Tables
:param entity: A synapse ID of Table Schema
:param destinationId: Synapse ID of a project that the Table wants to be copied to
:param updateExisting: Can choose to update files that have the same name
Default to False
"""
print("Getting table %s" % entity)
myTableSchema = syn.get(entity)
# CHECK: If Table name already exists, raise value error
existingEntity = syn.findEntityId(myTableSchema.name, parent=destinationId)
if existingEntity is not None:
raise ValueError('An entity named "%s" already exists in this location. Table could not be copied'
% myTableSchema.name)
d = syn.tableQuery('select * from %s' % myTableSchema.id, includeRowIdAndRowVersion=False)
colIds = myTableSchema.columnIds
newTableSchema = Schema(name=myTableSchema.name, parent=destinationId, columns=colIds)
print("Created new table using schema %s" % newTableSchema.name)
newTable = Table(schema=newTableSchema, values=d.filepath)
newTable = syn.store(newTable)
return newTable.schema.id
def _copyLink(syn, entity, destinationId, updateExisting=False):
"""
Copies Link entities
:param entity: A synapse ID of a Link entity
:param destinationId: Synapse ID of a folder/project that the file wants to be copied to
:param updateExisting: Can choose to update files that have the same name
Default to False
"""
ent = syn.get(entity)
# CHECK: If Link is in the same parent directory (throw an error)
if not updateExisting:
existingEntity = syn.findEntityId(ent.name, parent=destinationId)
if existingEntity is not None:
raise ValueError('An entity named "%s" already exists in this location. Link could not be copied'
% ent.name)
newLink = Link(ent.linksTo['targetId'], parent=destinationId,
targetVersion=ent.linksTo.get('targetVersionNumber'))
try:
newLink = syn.store(newLink)
return newLink.id
except SynapseHTTPError as e:
if e.response.status_code == 404:
print("WARNING: The target of this link %s no longer exists" % ent.id)
return None
else:
raise e
def _getSubWikiHeaders(wikiHeaders, subPageId, mapping=None):
"""
Function to assist in getting wiki headers of subwikipages
"""
subPageId = str(subPageId)
for i in wikiHeaders:
# This is for the first match
# If it isnt the actual parent, it will turn the first match into a parent node which will not have a parentId
if i['id'] == subPageId:
if mapping is None:
i.pop("parentId", None)
mapping = [i]
else:
mapping.append(i)
elif i.get('parentId') == subPageId:
mapping = _getSubWikiHeaders(wikiHeaders, subPageId=i['id'], mapping=mapping)
return mapping
def _updateSynIds(newWikis, wikiIdMap, entityMap):
print("Updating Synapse references:\n")
for oldWikiId in wikiIdMap.keys():
# go through each wiki page once more:
newWikiId = wikiIdMap[oldWikiId]
newWiki = newWikis[newWikiId]
print('Updated Synapse references for Page: %s\n' % newWikiId)
s = newWiki.markdown
for oldSynId in entityMap.keys():
# go through each wiki page once more:
newSynId = entityMap[oldSynId]
oldSynId = oldSynId + "\\b"
s = re.sub(oldSynId, newSynId, s)
print("Done updating Synapse IDs.\n")
newWikis[newWikiId].markdown = s
return newWikis
def _updateInternalLinks(newWikis, wikiIdMap, entity, destinationId):
print("Updating internal links:\n")
for oldWikiId in wikiIdMap.keys():
# go through each wiki page once more:
newWikiId = wikiIdMap[oldWikiId]
newWiki = newWikis[newWikiId]
print("\tUpdating internal links for Page: %s\n" % newWikiId)
s = newWiki["markdown"]
# in the markdown field, replace all occurrences of entity/wiki/abc with destinationId/wiki/xyz,
# where wikiIdMap maps abc->xyz
# replace <entity>/wiki/<oldWikiId> with <destinationId>/wiki/<newWikiId>
for oldWikiId2 in wikiIdMap.keys():
oldProjectAndWikiId = "%s/wiki/%s\\b" % (entity, oldWikiId2)
newProjectAndWikiId = "%s/wiki/%s" % (destinationId, wikiIdMap[oldWikiId2])
s = re.sub(oldProjectAndWikiId, newProjectAndWikiId, s)
# now replace any last references to entity with destinationId
s = re.sub(entity, destinationId, s)
newWikis[newWikiId].markdown = s
return newWikis
def copyWiki(syn, entity, destinationId, entitySubPageId=None, destinationSubPageId=None, updateLinks=True,
updateSynIds=True, entityMap=None):
"""
Copies wikis and updates internal links
:param syn: A synapse object: syn = synapseclient.login()- Must be logged into synapse
:param entity: A synapse ID of an entity whose wiki you want to copy
:param destinationId: Synapse ID of a folder/project that the wiki wants to be copied to
:param updateLinks: Update all the internal links. (e.g. syn1234/wiki/34345 becomes syn3345/wiki/49508)
Defaults to True
:param updateSynIds: Update all the synapse ID's referenced in the wikis. (e.g. syn1234 becomes syn2345)
Defaults to True but needs an entityMap
:param entityMap: An entity map {'oldSynId','newSynId'} to update the synapse IDs referenced in the
wiki.
Defaults to None
:param entitySubPageId: Can specify subPageId and copy all of its subwikis
Defaults to None, which copies the entire wiki subPageId can be found:
https://www.synapse.org/#!Synapse:syn123/wiki/1234
In this case, 1234 is the subPageId.
:param destinationSubPageId: Can specify destination subPageId to copy wikis to
Defaults to None
:returns: A list of Objects with three fields: id, title and parentId.
"""
# Validate input parameters
if entitySubPageId:
entitySubPageId = str(int(entitySubPageId))
if destinationSubPageId:
destinationSubPageId = str(int(destinationSubPageId))
oldOwn = syn.get(entity, downloadFile=False)
# getWikiHeaders fails when there is no wiki
try:
oldWikiHeaders = syn.getWikiHeaders(oldOwn)
except SynapseHTTPError as e:
if e.response.status_code == 404:
return []
else:
raise e
newOwn = syn.get(destinationId, downloadFile=False)
wikiIdMap = dict()
newWikis = dict()
# If entitySubPageId is given but not destinationSubPageId, set the pageId to "" (will get the root page)
# A entitySubPage could be copied to a project without any wiki pages, this has to be checked
newWikiPage = None
if destinationSubPageId:
try:
newWikiPage = syn.getWiki(newOwn, destinationSubPageId)
except SynapseHTTPError as e:
if e.response.status_code == 404:
pass
else:
raise e
if entitySubPageId:
oldWikiHeaders = _getSubWikiHeaders(oldWikiHeaders, entitySubPageId)
if not oldWikiHeaders:
return []
for wikiHeader in oldWikiHeaders:
wiki = syn.getWiki(oldOwn, wikiHeader['id'])
print('Got wiki %s' % wikiHeader['id'])
if not wiki.get('attachmentFileHandleIds'):
new_file_handles = []
else:
results = [syn._getFileHandleDownload(filehandleId, wiki.id, objectType='WikiAttachment')
for filehandleId in wiki['attachmentFileHandleIds']]
# Get rid of the previews
nopreviews = [attach['fileHandle'] for attach in results
if not attach['fileHandle']['isPreview']]
contentTypes = [attach['contentType'] for attach in nopreviews]
fileNames = [attach['fileName'] for attach in nopreviews]
copiedFileHandles = copyFileHandles(syn, nopreviews, ["WikiAttachment"]*len(nopreviews),
[wiki.id]*len(nopreviews), contentTypes, fileNames)
# Check if failurecodes exist
for filehandle in copiedFileHandles:
if filehandle.get("failureCode") is not None:
raise ValueError("%s dataFileHandleId: %s" % (filehandle["failureCode"],
filehandle['originalFileHandleId']))
new_file_handles = [filehandle['newFileHandle']['id'] for filehandle in copiedFileHandles]
# for some reason some wikis don't have titles?
if hasattr(wikiHeader, 'parentId'):
newWikiPage = Wiki(owner=newOwn, title=wiki.get('title', ''), markdown=wiki.markdown,
fileHandles=new_file_handles, parentWikiId=wikiIdMap[wiki.parentWikiId])
newWikiPage = syn.store(newWikiPage)
else:
if destinationSubPageId is not None and newWikiPage is not None:
newWikiPage["attachmentFileHandleIds"] = new_file_handles
newWikiPage["markdown"] = wiki["markdown"]
newWikiPage["title"] = wiki.get("title", "")
# Need to add logic to update titles here
newWikiPage = syn.store(newWikiPage)
else:
newWikiPage = Wiki(owner=newOwn, title=wiki.get("title", ""), markdown=wiki.markdown,
fileHandles=new_file_handles, parentWikiId=destinationSubPageId)
newWikiPage = syn.store(newWikiPage)
newWikis[newWikiPage['id']] = newWikiPage
wikiIdMap[wiki['id']] = newWikiPage['id']
if updateLinks:
newWikis = _updateInternalLinks(newWikis, wikiIdMap, entity, destinationId)
if updateSynIds and entityMap is not None:
newWikis = _updateSynIds(newWikis, wikiIdMap, entityMap)
print("Storing new Wikis\n")
for oldWikiId in wikiIdMap.keys():
newWikiId = wikiIdMap[oldWikiId]
newWikis[newWikiId] = syn.store(newWikis[newWikiId])
print("\tStored: %s\n" % newWikiId)
return syn.getWikiHeaders(newOwn)
|
|
#!/usr/bin/env python
"""
Do windowed detection by classifying a number of images/crops at once,
optionally using the selective search window proposal method.
This implementation follows
Ross Girshick, Jeff Donahue, Trevor Darrell, Jitendra Malik.
Rich feature hierarchies for accurate object detection and semantic
segmentation.
http://arxiv.org/abs/1311.2524
The selective_search_ijcv_with_python code is available at
https://github.com/sergeyk/selective_search_ijcv_with_python
TODO:
- batch up image filenames as well: don't want to load all of them into memory
- refactor into class (without globals)
- get rid of imagenet mean file and just use mean pixel value
"""
import numpy as np
import pandas as pd
import os
import sys
import argparse
import time
import skimage.io
import skimage.transform
import selective_search_ijcv_with_python as selective_search
import caffe
NET = None
IMAGE_DIM = None
CROPPED_DIM = None
IMAGE_CENTER = None
IMAGE_MEAN = None
CROPPED_IMAGE_MEAN = None
BATCH_SIZE = None
NUM_OUTPUT = None
CROP_MODES = ['list', 'center_only', 'corners', 'selective_search']
def load_image(filename):
"""
Input:
filename: string
Output:
image: an image of size (H x W x 3) of type uint8.
"""
img = skimage.io.imread(filename)
if img.ndim == 2:
img = np.tile(img[:, :, np.newaxis], (1, 1, 3))
elif img.shape[2] == 4:
img = img[:, :, :3]
return img
def format_image(image, window=None, cropped_size=False):
"""
Input:
image: (H x W x 3) ndarray
window: (4) ndarray
(ymin, xmin, ymax, xmax) coordinates, 0-indexed
cropped_size: bool
Whether to output cropped size image or full size image.
Output:
image: (3 x H x W) ndarray
Resized to either IMAGE_DIM or CROPPED_DIM.
dims: (H, W) of the original image
"""
dims = image.shape[:2]
# Crop a subimage if window is provided.
if window is not None:
image = image[window[0]:window[2], window[1]:window[3]]
# Resize to input size, subtract mean, convert to BGR
image = image[:, :, ::-1]
if cropped_size:
image = skimage.transform.resize(image, (CROPPED_DIM, CROPPED_DIM)) * 255
image -= CROPPED_IMAGE_MEAN
else:
image = skimage.transform.resize(image, (IMAGE_DIM, IMAGE_DIM)) * 255
image -= IMAGE_MEAN
image = image.swapaxes(1, 2).swapaxes(0, 1)
return image, dims
def _image_coordinates(dims, window):
"""
Calculate the original image coordinates of a
window in the canonical (IMAGE_DIM x IMAGE_DIM) coordinates
Input:
dims: (H, W) of the original image
window: (ymin, xmin, ymax, xmax) in the (IMAGE_DIM x IMAGE_DIM) frame
Output:
image_window: (ymin, xmin, ymax, xmax) in the original image frame
"""
h, w = dims
max_dim = float(IMAGE_DIM)
h_scale, w_scale = h / max_dim, w / max_dim
image_window = window * np.array((1. / h_scale, 1. / w_scale,
h_scale, w_scale))
return image_window.round().astype(int)
def _assemble_images_list(input_df):
"""
For each image, collect the crops for the given windows.
Input:
input_df: pandas.DataFrame
with 'filename', 'ymin', 'xmin', 'ymax', 'xmax' columns
Output:
images_df: pandas.DataFrame
with 'image', 'window', 'filename' columns
"""
# unpack sequence of (image filename, windows)
coords = ['ymin', 'xmin', 'ymax', 'xmax']
image_windows = (
(ix, input_df.iloc[np.where(input_df.index == ix)][coords].values)
for ix in input_df.index.unique()
)
# extract windows
data = []
for image_fname, windows in image_windows:
image = load_image(image_fname)
for window in windows:
window_image, _ = format_image(image, window, cropped_size=True)
data.append({
'image': window_image[np.newaxis, :],
'window': window,
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_center_only(image_fnames):
"""
For each image, square the image and crop its center.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
crop_start, crop_end = IMAGE_CENTER, IMAGE_CENTER + CROPPED_DIM
crop_window = np.array((crop_start, crop_start, crop_end, crop_end))
data = []
for image_fname in image_fnames:
image, dims = format_image(load_image(image_fname))
data.append({
'image': image[np.newaxis, :,
crop_start:crop_end,
crop_start:crop_end],
'window': _image_coordinates(dims, crop_window),
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_corners(image_fnames):
"""
For each image, square the image and crop its center, four corners,
and mirrored version of the above.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
# make crops
indices = [0, IMAGE_DIM - CROPPED_DIM]
crops = np.empty((5, 4), dtype=int)
curr = 0
for i in indices:
for j in indices:
crops[curr] = (i, j, i + CROPPED_DIM, j + CROPPED_DIM)
curr += 1
crops[4] = (IMAGE_CENTER, IMAGE_CENTER,
IMAGE_CENTER + CROPPED_DIM, IMAGE_CENTER + CROPPED_DIM)
all_crops = np.tile(crops, (2, 1))
data = []
for image_fname in image_fnames:
image, dims = format_image(load_image(image_fname))
image_crops = np.empty((10, 3, CROPPED_DIM, CROPPED_DIM), dtype=np.float32)
curr = 0
for crop in crops:
image_crops[curr] = image[:, crop[0]:crop[2], crop[1]:crop[3]]
curr += 1
image_crops[5:] = image_crops[:5, :, :, ::-1] # flip for mirrors
for i in range(len(all_crops)):
data.append({
'image': image_crops[i][np.newaxis, :],
'window': _image_coordinates(dims, all_crops[i]),
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def _assemble_images_selective_search(image_fnames):
"""
Run Selective Search window proposals on all images, then for each
image-window pair, extract a square crop.
Input:
image_fnames: list
Output:
images_df: pandas.DataFrame
With 'image', 'window', 'filename' columns.
"""
windows_list = selective_search.get_windows(image_fnames)
data = []
for image_fname, windows in zip(image_fnames, windows_list):
image = load_image(image_fname)
for window in windows:
window_image, _ = format_image(image, window, cropped_size=True)
data.append({
'image': window_image[np.newaxis, :],
'window': window,
'filename': image_fname
})
images_df = pd.DataFrame(data)
return images_df
def assemble_batches(inputs, crop_mode='center_only'):
"""
Assemble DataFrame of image crops for feature computation.
Input:
inputs: list of filenames (center_only, corners, and selective_search mode)
OR input DataFrame (list mode)
mode: string
'list': take the image windows from the input as-is
'center_only': take the CROPPED_DIM middle of the image windows
'corners': take CROPPED_DIM-sized boxes at 4 corners and center of
the image windows, as well as their flipped versions: a total of 10.
'selective_search': run Selective Search region proposal on the
image windows, and take each enclosing subwindow.
Output:
df_batches: list of DataFrames, each one of BATCH_SIZE rows.
Each row has 'image', 'filename', and 'window' info.
Column 'image' contains (X x 3 x CROPPED_DIM x CROPPED_IM) ndarrays.
Column 'filename' contains source filenames.
Column 'window' contains [ymin, xmin, ymax, xmax] ndarrays.
If 'filename' is None, then the row is just for padding.
Note: for increased efficiency, increase the batch size (to the limit of gpu
memory) to avoid the communication cost
"""
if crop_mode == 'list':
images_df = _assemble_images_list(inputs)
elif crop_mode == 'center_only':
images_df = _assemble_images_center_only(inputs)
elif crop_mode == 'corners':
images_df = _assemble_images_corners(inputs)
elif crop_mode == 'selective_search':
images_df = _assemble_images_selective_search(inputs)
else:
raise Exception("Unknown mode: not in {}".format(CROP_MODES))
# Make sure the DataFrame has a multiple of BATCH_SIZE rows:
# just fill the extra rows with NaN filenames and all-zero images.
N = images_df.shape[0]
remainder = N % BATCH_SIZE
if remainder > 0:
zero_image = np.zeros_like(images_df['image'].iloc[0])
zero_window = np.zeros((1, 4), dtype=int)
remainder_df = pd.DataFrame([{
'filename': None,
'image': zero_image,
'window': zero_window
}] * (BATCH_SIZE - remainder))
images_df = images_df.append(remainder_df)
N = images_df.shape[0]
# Split into batches of BATCH_SIZE.
ind = np.arange(N) / BATCH_SIZE
df_batches = [images_df[ind == i] for i in range(N / BATCH_SIZE)]
return df_batches
def compute_feats(images_df):
input_blobs = [np.ascontiguousarray(
np.concatenate(images_df['image'].values), dtype='float32')]
output_blobs = [np.empty((BATCH_SIZE, NUM_OUTPUT, 1, 1), dtype=np.float32)]
NET.Forward(input_blobs, output_blobs)
feats = [output_blobs[0][i].flatten() for i in range(len(output_blobs[0]))]
# Add the features and delete the images.
del images_df['image']
images_df['feat'] = feats
return images_df
def config(model_def, pretrained_model, gpu, image_dim, image_mean_file):
global IMAGE_DIM, CROPPED_DIM, IMAGE_CENTER, IMAGE_MEAN, CROPPED_IMAGE_MEAN
global NET, BATCH_SIZE, NUM_OUTPUT
# Initialize network by loading model definition and weights.
t = time.time()
print("Loading Caffe model.")
NET = caffe.Net(model_def, pretrained_model)
NET.set_phase_test()
if gpu:
NET.set_mode_gpu()
print("Caffe model loaded in {:.3f} s".format(time.time() - t))
# Configure for input/output data
IMAGE_DIM = image_dim
CROPPED_DIM = NET.blobs.values()[0].width
IMAGE_CENTER = int((IMAGE_DIM - CROPPED_DIM) / 2)
# Load the data set mean file
IMAGE_MEAN = np.load(image_mean_file)
CROPPED_IMAGE_MEAN = IMAGE_MEAN[IMAGE_CENTER:IMAGE_CENTER + CROPPED_DIM,
IMAGE_CENTER:IMAGE_CENTER + CROPPED_DIM,
:]
BATCH_SIZE = NET.blobs.values()[0].num # network batch size
NUM_OUTPUT = NET.blobs.values()[-1].channels # number of output classes
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Required arguments: input and output.
parser.add_argument(
"input_file",
help="Input txt/csv filename. If .txt, must be list of filenames.\
If .csv, must be comma-separated file with header\
'filename, xmin, ymin, xmax, ymax'"
)
parser.add_argument(
"output_file",
help="Output h5/csv filename. Format depends on extension."
)
# Optional arguments.
parser.add_argument(
"--model_def",
default="../../../examples/imagenet/imagenet_deploy.prototxt",
help="Model definition file."
)
parser.add_argument(
"--pretrained_model",
default="../../../examples/imagenet/caffe_reference_imagenet_model",
help="Trained model weights file."
)
parser.add_argument(
"--gpu",
default=False,
help="Switch for gpu computation."
)
parser.add_argument(
"--crop_mode",
default="center_only",
choices=CROP_MODES,
help="Image crop mode"
)
parser.add_argument(
"--images_dim",
default=256,
help="Canonical dimension of (square) images."
)
parser.add_argument(
"--images_mean_file",
default=os.path.join(
os.path.dirname(__file__), '../imagenet/ilsvrc_2012_mean.npy'),
help="Data set image mean (numpy array).")
args = parser.parse_args()
# Configure network, input, output.
config(args.model_def, args.pretrained_model, args.gpu, args.images_dim,
args.images_mean_file)
# Load input.
t = time.time()
print('Loading input and assembling batches...')
if args.input_file.lower().endswith('txt'):
with open(args.input_file) as f:
inputs = [_.strip() for _ in f.readlines()]
elif args.input_file.lower().endswith('csv'):
inputs = pd.read_csv(args.input_file, sep=',', dtype={'filename': str})
inputs.set_index('filename', inplace=True)
else:
raise Exception("Uknown input file type: not in txt or csv")
# Assemble into batches
image_batches = assemble_batches(inputs, args.crop_mode)
print('{} batches assembled in {:.3f} s'.format(len(image_batches),
time.time() - t))
# Process the batches.
t = time.time()
print 'Processing {} files in {} batches'.format(len(inputs),
len(image_batches))
dfs_with_feats = []
for i in range(len(image_batches)):
if i % 10 == 0:
print('...on batch {}/{}, elapsed time: {:.3f} s'.format(
i, len(image_batches), time.time() - t))
dfs_with_feats.append(compute_feats(image_batches[i]))
# Concatenate, droppping the padding rows.
df = pd.concat(dfs_with_feats).dropna(subset=['filename'])
df.set_index('filename', inplace=True)
print("Processing complete after {:.3f} s.".format(time.time() - t))
# Label coordinates
coord_cols = ['ymin', 'xmin', 'ymax', 'xmax']
df[coord_cols] = pd.DataFrame(
data=np.vstack(df['window']), index=df.index, columns=coord_cols)
del(df['window'])
# Write out the results.
t = time.time()
if args.output_file.lower().endswith('csv'):
# enumerate the class probabilities
class_cols = ['class{}'.format(x) for x in range(NUM_OUTPUT)]
df[class_cols] = pd.DataFrame(
data=np.vstack(df['feat']), index=df.index, columns=class_cols)
df.to_csv(args.output_file, cols=coord_cols + class_cols)
else:
df.to_hdf(args.output_file, 'df', mode='w')
print("Done. Saving to {} took {:.3f} s.".format(
args.output_file, time.time() - t))
sys.exit()
|
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Validate VO Services."""
from __future__ import absolute_import, division, print_function, unicode_literals
from ...extern import six
# STDLIB
import multiprocessing
import os
import warnings
# LOCAL
from .exceptions import ValidationMultiprocessingError, InvalidValidationAttribute
from ..client import vos_catalog
from ..client.exceptions import VOSError
from ...config.configuration import ConfigAlias
from ...io import votable
from ...io.votable.exceptions import E19
from ...io.votable.validator import html, result
from ...logger import log
from ...utils import OrderedDict # For 2.6 compatibility
from ...utils import data
from ...utils.exceptions import AstropyUserWarning
from ...utils.timer import timefunc
from ...utils.xml.unescaper import unescape_all
# Temporary solution until STScI VAO registry formally provides
# <testQuery> tags
from .tstquery import parse_cs
__all__ = ['check_conesearch_sites']
CS_MSTR_LIST = ConfigAlias(
'0.4', 'CS_MSTR_LIST', 'conesearch_master_list',
'astropy.vo.validator.validate', 'astropy.vo.validator')
CS_URLS = ConfigAlias(
'0.4', 'CS_URLS', 'conesearch_urls',
'astropy.vo.validator.validate', 'astropy.vo.validator')
NONCRIT_WARNINGS = ConfigAlias(
'0.4', 'NONCRIT_WARNINGS', 'noncritical_warnings',
'astropy.vo.validator.validate', 'astropy.vo.validator')
@timefunc(1)
def check_conesearch_sites(destdir=os.curdir, verbose=True, parallel=True,
url_list='default'):
"""Validate Cone Search Services.
.. note::
URLs are unescaped prior to validation.
Only check queries with ``<testQuery>`` parameters.
Does not perform meta-data and erroneous queries.
Parameters
----------
destdir : str, optional
Directory to store output files. Will be created if does
not exist. Existing files with these names will be deleted
or replaced:
* conesearch_good.json
* conesearch_warn.json
* conesearch_exception.json
* conesearch_error.json
verbose : bool, optional
Print extra info to log.
parallel : bool, optional
Enable multiprocessing.
url_list : list of string, optional
Only check these access URLs against
`astropy.vo.validator.Conf.conesearch_master_list` and ignore
the others, which will not appear in output files. By
default, check those in
`astropy.vo.validator.Conf.conesearch_urls`. If `None`, check
everything.
Raises
------
IOError
Invalid destination directory.
timeout
URL request timed out.
ValidationMultiprocessingError
Multiprocessing failed.
"""
from . import conf
if url_list == 'default':
url_list = conf.conesearch_urls
if (not isinstance(destdir, six.string_types) or len(destdir) == 0 or
os.path.exists(destdir) and not os.path.isdir(destdir)):
raise IOError('Invalid destination directory') # pragma: no cover
if not os.path.exists(destdir):
os.mkdir(destdir)
# Output dir created by votable.validator
out_dir = os.path.join(destdir, 'results')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
# Output files
db_file = OrderedDict()
db_file['good'] = os.path.join(destdir, 'conesearch_good.json')
db_file['warn'] = os.path.join(destdir, 'conesearch_warn.json')
db_file['excp'] = os.path.join(destdir, 'conesearch_exception.json')
db_file['nerr'] = os.path.join(destdir, 'conesearch_error.json')
# JSON dictionaries for output files
js_tree = {}
for key in db_file:
js_tree[key] = vos_catalog.VOSDatabase.create_empty()
# Delete existing files, if any, to be on the safe side.
# Else can cause confusion if program exited prior to
# new files being written but old files are still there.
if os.path.exists(db_file[key]): # pragma: no cover
os.remove(db_file[key])
if verbose:
log.info('Existing file {0} deleted'.format(db_file[key]))
# Master VO database from registry. Silence all the warnings.
with warnings.catch_warnings():
warnings.simplefilter('ignore')
js_mstr = vos_catalog.VOSDatabase.from_registry(
CS_MSTR_LIST(), encoding='binary', show_progress=verbose)
# Validate only a subset of the services.
if url_list is not None:
# Make sure URL is unique and fixed.
url_list = set(six.moves.map(unescape_all, [cur_url.encode('utf-8') if isinstance(cur_url, str) else cur_url for cur_url in url_list]))
uniq_rows = len(url_list)
url_list_processed = [] # To track if given URL is valid in registry
if verbose:
log.info('Only {0}/{1} site(s) are validated'.format(
uniq_rows, len(js_mstr)))
# Validate all services.
else:
uniq_rows = len(js_mstr)
key_lookup_by_url = {}
# Process each catalog in the registry.
for cur_key, cur_cat in js_mstr.get_catalogs():
cur_url = cur_cat['url']
# Skip if:
# a. not a Cone Search service
# b. not in given subset, if any
if ((cur_cat['capabilityClass'] != b'ConeSearch') or
(url_list is not None and cur_url not in url_list)):
continue
# Use testQuery to return non-empty VO table with max verbosity.
testquery_pars = parse_cs(cur_cat['resourceID'])
cs_pars_arr = ['='.join([key, testquery_pars[key]]).encode('utf-8')
for key in testquery_pars]
cs_pars_arr += [b'VERB=3']
# Track the service.
key_lookup_by_url[cur_url + b'&'.join(cs_pars_arr)] = cur_key
if url_list is not None:
url_list_processed.append(cur_url)
# Give warning if any of the user given subset is not in the registry.
if url_list is not None:
url_list_skipped = url_list - set(url_list_processed)
n_skipped = len(url_list_skipped)
if n_skipped > 0:
warn_str = '{0} not found in registry! Skipped:\n'.format(n_skipped)
for cur_url in url_list_skipped:
warn_str += '\t{0}\n'.format(cur_url)
warnings.warn(warn_str, AstropyUserWarning)
all_urls = list(key_lookup_by_url)
timeout = data.conf.remote_timeout
map_args = [(out_dir, url, timeout) for url in all_urls]
# Validate URLs
if parallel:
pool = multiprocessing.Pool()
try:
mp_list = pool.map(_do_validation, map_args)
except Exception as exc: # pragma: no cover
raise ValidationMultiprocessingError(
'An exception occurred during parallel processing '
'of validation results: {0}'.format(exc))
else:
mp_list = map(_do_validation, map_args)
# Categorize validation results
for r in mp_list:
db_key = r['out_db_name']
cat_key = key_lookup_by_url[r.url]
cur_cat = js_mstr.get_catalog(cat_key)
_copy_r_to_cat(r, cur_cat)
js_tree[db_key].add_catalog(cat_key, cur_cat)
# Write to HTML
html_subsets = result.get_result_subsets(mp_list, out_dir)
html.write_index(html_subsets, all_urls, out_dir)
if parallel:
html_subindex_args = [(out_dir, html_subset, uniq_rows)
for html_subset in html_subsets]
pool.map(_html_subindex, html_subindex_args)
else:
for html_subset in html_subsets:
_html_subindex((out_dir, html_subset, uniq_rows))
# Write to JSON
n = {}
n_tot = 0
for key in db_file:
n[key] = len(js_tree[key])
n_tot += n[key]
js_tree[key].to_json(db_file[key], clobber=True)
if verbose:
log.info('{0}: {1} catalog(s)'.format(key, n[key]))
# Checksum
if verbose:
log.info('total: {0} out of {1} catalog(s)'.format(n_tot, uniq_rows))
if n['good'] == 0: # pragma: no cover
warnings.warn(
'No good sites available for Cone Search.', AstropyUserWarning)
def _do_validation(args):
"""Validation for multiprocessing support."""
root, url, timeout = args
votable.table.reset_vo_warnings()
r = result.Result(url, root=root, timeout=timeout)
r.validate_vo()
_categorize_result(r)
# This was already checked above.
# Calling this again to get VOTableFile object to catch
# well-formed error responses in downloaded XML.
#
# 'incorrect' is also added in case user wants to use
# 'conesearch_warn.json' anyway.
#
# If using cached data, it will not detect network error
# like the first run, but will raise exception.
#
# When SR is not 0, VOSError is raised for empty table.
#
if r['expected'] in ('good', 'incorrect') and r['nexceptions'] == 0:
nexceptions = 0
nwarnings = 0
lines = []
with warnings.catch_warnings(record=True) as warning_lines:
try:
tab = vos_catalog.vo_tab_parse(votable.table.parse(
r.get_vo_xml_path(), pedantic=False), r.url, {})
except (E19, IndexError, VOSError) as e: # pragma: no cover
lines.append(str(e))
nexceptions += 1
lines = [str(x.message) for x in warning_lines] + lines
warning_types = set()
for line in lines: # pragma: no cover
w = votable.exceptions.parse_vowarning(line)
if w['is_warning']:
nwarnings += 1
if w['is_exception']:
nexceptions += 1
warning_types.add(w['warning'])
r['nwarnings'] += nwarnings
r['nexceptions'] += nexceptions
r['warnings'] += lines
r['warning_types'] = r['warning_types'].union(warning_types)
_categorize_result(r)
html.write_result(r)
return r
def _categorize_result(r):
"""Set success codes.
Parameters
----------
r : `astropy.io.votable.validator.result.Result`
Raises
------
InvalidValidationAttribute
Unhandled validation result attributes.
"""
from . import conf
if 'network_error' in r and r['network_error'] is not None: # pragma: no cover
r['out_db_name'] = 'nerr'
r['expected'] = 'broken'
elif ((r['nexceptions'] == 0 and r['nwarnings'] == 0) or
r['warning_types'].issubset(conf.noncritical_warnings)):
r['out_db_name'] = 'good'
r['expected'] = 'good'
elif r['nexceptions'] > 0: # pragma: no cover
r['out_db_name'] = 'excp'
r['expected'] = 'incorrect'
elif r['nwarnings'] > 0: # pragma: no cover
r['out_db_name'] = 'warn'
r['expected'] = 'incorrect'
else: # pragma: no cover
raise InvalidValidationAttribute(
'Unhandled validation result attributes: {0}'.format(r._attributes))
def _html_subindex(args):
"""HTML writer for multiprocessing support."""
out_dir, subset, total = args
html.write_index_table(out_dir, *subset, total=total)
def _copy_r_to_cat(r, cat):
"""Copy validation result attributes to given VO catalog.
Parameters
----------
r : `astropy.io.votable.validate.result.Result`
cat : `astropy.vo.client.vos_catalog.VOSCatalog`
"""
for key in r._attributes:
new_key = 'validate_' + key
cat[new_key] = r._attributes[key]
|
|
"""
@package mi.instrument.um.thsph.thsph.driver
@file marine-integrations/mi/instrument/um/thsph/thsph/driver.py
@author Richard Han
@brief Driver for the thsph
Release notes:
Vent Chemistry Instrument Driver
"""
import re
import time
from mi.core.exceptions import InstrumentException
from mi.core.driver_scheduler import DriverSchedulerConfigKey, TriggerType
from mi.core.exceptions import SampleException, InstrumentProtocolException, InstrumentParameterException
from mi.core.instrument.driver_dict import DriverDictKey
from mi.core.instrument.protocol_param_dict import ParameterDictType, ParameterDictVisibility
from mi.core.log import get_logger, get_logging_metaclass
from mi.core.common import BaseEnum, Units
from mi.core.instrument.instrument_protocol import CommandResponseInstrumentProtocol
from mi.core.instrument.instrument_fsm import ThreadSafeFSM
from mi.core.instrument.instrument_driver import SingleConnectionInstrumentDriver, DriverConfigKey
from mi.core.instrument.instrument_driver import DriverEvent
from mi.core.instrument.instrument_driver import DriverAsyncEvent
from mi.core.instrument.instrument_driver import DriverProtocolState
from mi.core.instrument.instrument_driver import DriverParameter
from mi.core.instrument.data_particle import DataParticle
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.instrument.data_particle import CommonDataParticleType
from mi.core.instrument.chunker import StringChunker
__author__ = 'Richard Han'
__license__ = 'Apache 2.0'
log = get_logger()
###
# Driver Constant Definitions
###
# newline.
NEWLINE = '\r\n'
# default timeout.
TIMEOUT = 10
class ScheduledJob(BaseEnum):
AUTO_SAMPLE = 'auto_sample'
class DataParticleType(BaseEnum):
"""
Data particle types produced by this driver
"""
RAW = CommonDataParticleType.RAW
THSPH_PARSED = 'thsph_sample'
class Command(BaseEnum):
"""
Instrument command strings
"""
GET_SAMPLE = 'get_sample_cmd' # Gets data sample from ADC
class ProtocolState(BaseEnum):
"""
Instrument protocol states
"""
UNKNOWN = DriverProtocolState.UNKNOWN
COMMAND = DriverProtocolState.COMMAND
AUTOSAMPLE = DriverProtocolState.AUTOSAMPLE
DIRECT_ACCESS = DriverProtocolState.DIRECT_ACCESS
class ProtocolEvent(BaseEnum):
"""
Protocol events
"""
ENTER = DriverEvent.ENTER
EXIT = DriverEvent.EXIT
DISCOVER = DriverEvent.DISCOVER
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
EXECUTE_DIRECT = DriverEvent.EXECUTE_DIRECT
ACQUIRE_SAMPLE = DriverEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = DriverEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = DriverEvent.STOP_AUTOSAMPLE
GET = DriverEvent.GET
SET = DriverEvent.SET
SCHEDULE_ACQUIRE_SAMPLE = 'DRIVER_EVENT_SCHEDULE_ACQUIRE_SAMPLE'
class Capability(BaseEnum):
"""
Protocol events that should be exposed to users (subset of above).
"""
ACQUIRE_SAMPLE = ProtocolEvent.ACQUIRE_SAMPLE
START_AUTOSAMPLE = ProtocolEvent.START_AUTOSAMPLE
STOP_AUTOSAMPLE = ProtocolEvent.STOP_AUTOSAMPLE
GET = ProtocolEvent.GET
SET = ProtocolEvent.SET
START_DIRECT = DriverEvent.START_DIRECT
STOP_DIRECT = DriverEvent.STOP_DIRECT
DISCOVER = DriverEvent.DISCOVER
class Parameter(DriverParameter):
"""
Device specific parameters for THSPH.
"""
INTERVAL = 'SampleInterval'
INSTRUMENT_SERIES = 'InstrumentSeries'
class Prompt(BaseEnum):
"""
Device i/o prompts for THSPH
"""
COMM_RESPONSE = 'aP#'
###############################################################################
# Data Particles
###############################################################################
class THSPHDataParticleKey(BaseEnum):
HIGH_IMPEDANCE_ELECTRODE_1 = "thsph_hie1" # High Impedance Electrode 1 for pH
HIGH_IMPEDANCE_ELECTRODE_2 = "thsph_hie2" # High Impedance Electrode 2 for pH
H2_ELECTRODE = "thsph_h2electrode" # H2 electrode
S2_ELECTRODE = "thsph_s2electrode" # Sulfide Electrode
THERMOCOUPLE1 = "thsph_thermocouple1" # Type E thermocouple 1-high
THERMOCOUPLE2 = "thsph_thermocouple2" # Type E thermocouple 2-low
REFERENCE_THERMISTOR = "thsph_rthermistor" # Reference Thermistor
BOARD_THERMISTOR = "thsph_bthermistor" # Board Thermistor
class THSPHParticle(DataParticle):
"""
Routines for parsing raw data into a data particle structure. Override
the building of values, and the rest should come along for free.
The data signal is a concatenation of 8 channels of 14-bit resolution data.
Each channel is output as a four ASCII character hexadecimal number (0000 to 3FFF).
Each channel, 1-8, should be parsed as a 4 character hexadecimal number and converted
to a raw decimal number.
Sample:
aH200A200720DE20AA10883FFF2211225E#
Format:
aHaaaabbbbccccddddeeeeffffgggghhhh#
aaaa = Chanel 1 High Input Impedance Electrode;
bbbb = Chanel 2 High Input Impedance Electrode;
cccc = H2 Electrode;
dddd = S2 Electrode;
eeee = TYPE E Thermocouple 1;
ffff = TYPE E Thermocouple 2;
gggg = Thermistor;
hhhh Board 2 Thermistor;
"""
_data_particle_type = DataParticleType.THSPH_PARSED
@staticmethod
def regex():
"""
Regular expression to match a sample pattern
@return: regex string
"""
pattern = r'aH' # pattern starts with 'aH'
pattern += r'([0-9A-F]{4})' # Chanel 1 High Input Impedance Electrode
pattern += r'([0-9A-F]{4})' # Chanel 2 High Input Impedance Electrode
pattern += r'([0-9A-F]{4})' # H2 Electrode
pattern += r'([0-9A-F]{4})' # S2 Electrode
pattern += r'([0-9A-F]{4})' # Type E Thermocouple 1
pattern += r'([0-9A-F]{4})' # Type E Thermocouple 2
pattern += r'([0-9A-F]{4})' # Reference Thermistor
pattern += r'([0-9A-F]{4})' # Board Thermocouple
pattern += r'#' # pattern ends with '#'
return pattern
@staticmethod
def regex_compiled():
"""
get the compiled regex pattern
@return: compiled re
"""
return re.compile(THSPHParticle.regex())
def _build_parsed_values(self):
"""
Take something in the ADC data format and split it into
Chanel 1 High Input Impedance Electrode, Chanel 2 High Input
Impedance Electrode, H2 Electrode, S2 Electrode, Type E Thermocouple 1,
Type E Thermocouple 2, Reference Thermistor, Board Thermistor
@throws SampleException If there is a problem with sample creation
"""
match = THSPHParticle.regex_compiled().match(self.raw_data)
if not match:
raise SampleException("No regex match of THSPH parsed sample data: [%s]" %
self.raw_data)
try:
electrode1 = self.hex2value(match.group(1))
electrode2 = self.hex2value(match.group(2))
h2electrode = self.hex2value(match.group(3))
s2electrode = self.hex2value(match.group(4))
thermocouple1 = self.hex2value(match.group(5))
thermocouple2 = self.hex2value(match.group(6))
ref_thermistor = self.hex2value(match.group(7))
board_thermistor = self.hex2value(match.group(8))
except ValueError:
raise SampleException("ValueError while converting data: [%s]" %
self.raw_data)
result = [{DataParticleKey.VALUE_ID: THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_1,
DataParticleKey.VALUE: electrode1},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.HIGH_IMPEDANCE_ELECTRODE_2,
DataParticleKey.VALUE: electrode2},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.H2_ELECTRODE,
DataParticleKey.VALUE: h2electrode},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.S2_ELECTRODE,
DataParticleKey.VALUE: s2electrode},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.THERMOCOUPLE1,
DataParticleKey.VALUE: thermocouple1},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.THERMOCOUPLE2,
DataParticleKey.VALUE: thermocouple2},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.REFERENCE_THERMISTOR,
DataParticleKey.VALUE: ref_thermistor},
{DataParticleKey.VALUE_ID: THSPHDataParticleKey.BOARD_THERMISTOR,
DataParticleKey.VALUE: board_thermistor}]
return result
def hex2value(self, hex_value):
"""
Convert a ADC hex value to an int value.
@param hex_value: string to convert
@return: int of the converted value
"""
if not isinstance(hex_value, basestring):
raise InstrumentParameterException("hex value not a string")
value = int(hex_value, 16)
return value
###############################################################################
# Driver
###############################################################################
class InstrumentDriver(SingleConnectionInstrumentDriver):
"""
InstrumentDriver subclass
Subclasses SingleConnectionInstrumentDriver with connection state
machine.
"""
########################################################################
# Protocol builder.
########################################################################
def _build_protocol(self):
"""
Construct the driver protocol state machine.
"""
self._protocol = THSPHProtocol(Prompt, NEWLINE, self._driver_event)
###########################################################################
# Protocol
###########################################################################
class THSPHProtocol(CommandResponseInstrumentProtocol):
"""
Instrument protocol class
Subclasses CommandResponseInstrumentProtocol
"""
SERIES_A = 'A'
SERIES_B = 'B'
SERIES_C = 'C'
GET_SAMPLE_SERIES_A = 'aH*' # Gets data sample from ADC for series A
GET_SAMPLE_SERIES_B = 'bH*' # Gets data sample from ADC for series B
GET_SAMPLE_SERIES_C = 'cH*' # Gets data sample from ADC for series C
# THSPH commands for instrument series A, B and C
THSPH_COMMANDS = {
SERIES_A: {Command.GET_SAMPLE: GET_SAMPLE_SERIES_A},
SERIES_B: {Command.GET_SAMPLE: GET_SAMPLE_SERIES_B},
SERIES_C: {Command.GET_SAMPLE: GET_SAMPLE_SERIES_C},
}
__metaclass__ = get_logging_metaclass(log_level='debug')
def __init__(self, prompts, newline, driver_event):
"""
Protocol constructor.
@param prompts A BaseEnum class containing instrument prompts.
@param newline The newline.
@param driver_event Driver process event callback.
"""
# Construct protocol superclass.
CommandResponseInstrumentProtocol.__init__(self, prompts, newline, driver_event)
# Build protocol state machine.
self._protocol_fsm = ThreadSafeFSM(ProtocolState, ProtocolEvent,
ProtocolEvent.ENTER, ProtocolEvent.EXIT)
# Add event handlers for protocol state machine.
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.ENTER, self._handler_unknown_enter)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.EXIT, self._handler_unknown_exit)
self._protocol_fsm.add_handler(ProtocolState.UNKNOWN, ProtocolEvent.DISCOVER, self._handler_unknown_discover)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ENTER, self._handler_command_enter)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.EXIT, self._handler_command_exit)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_AUTOSAMPLE,
self._handler_command_start_autosample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.ACQUIRE_SAMPLE,
self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.GET, self._handler_command_get)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.SET, self._handler_command_set)
self._protocol_fsm.add_handler(ProtocolState.COMMAND, ProtocolEvent.START_DIRECT,
self._handler_command_start_direct)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.ENTER, self._handler_autosample_enter)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.EXIT, self._handler_autosample_exit)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.SCHEDULE_ACQUIRE_SAMPLE,
self._handler_command_acquire_sample)
self._protocol_fsm.add_handler(ProtocolState.AUTOSAMPLE, ProtocolEvent.STOP_AUTOSAMPLE,
self._handler_autosample_stop_autosample)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.ENTER,
self._handler_direct_access_enter)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXIT,
self._handler_direct_access_exit)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.EXECUTE_DIRECT,
self._handler_direct_access_execute_direct)
self._protocol_fsm.add_handler(ProtocolState.DIRECT_ACCESS, ProtocolEvent.STOP_DIRECT,
self._handler_direct_access_stop_direct)
# Construct the parameter dictionary containing device parameters,
# current parameter values, and set formatting functions.
self._build_driver_dict()
self._build_command_dict()
self._build_param_dict()
# Add build handlers for device commands.
self._add_build_handler(Command.GET_SAMPLE, self._build_simple_command)
# State state machine in COMMAND state.
self._protocol_fsm.start(ProtocolState.UNKNOWN)
# commands sent to device to be filtered in responses for telnet DA
self._sent_cmds = []
self._chunker = StringChunker(THSPHProtocol.sieve_function)
# Set Get Sample Command and Communication Test Command for Series A as default
self._get_sample_cmd = self.GET_SAMPLE_SERIES_A
self._direct_commands['Newline'] = self._newline
self._direct_commands['Test A'] = 'aP*' + self._newline
self._direct_commands['Test B'] = 'bP*' + self._newline
self._direct_commands['Test C'] = 'cP*' + self._newline
self._direct_commands['Sample A'] = self.GET_SAMPLE_SERIES_A + self._newline
self._direct_commands['Sample B'] = self.GET_SAMPLE_SERIES_B + self._newline
self._direct_commands['Sample C'] = self.GET_SAMPLE_SERIES_C + self._newline
@staticmethod
def sieve_function(raw_data):
"""
The method that splits samples
"""
matchers = []
return_list = []
matchers.append(THSPHParticle.regex_compiled())
for matcher in matchers:
log.trace('matcher: %r raw_data: %r', matcher.pattern, raw_data)
for match in matcher.finditer(raw_data):
return_list.append((match.start(), match.end()))
return return_list
def _got_chunk(self, chunk, timestamp):
"""
The base class got_data has gotten a chunk from the chunker. Pass it to extract_sample
with the appropriate particle objects and REGEXes.
"""
if not self._extract_sample(THSPHParticle, THSPHParticle.regex_compiled(), chunk, timestamp):
raise InstrumentProtocolException("Unhandled chunk")
def _build_driver_dict(self):
"""
Populate the driver dictionary with options
"""
self._driver_dict.add(DriverDictKey.VENDOR_SW_COMPATIBLE, True)
def _build_command_dict(self):
"""
Populate the command dictionary with command.
"""
self._cmd_dict.add(Capability.START_AUTOSAMPLE, display_name="Start Autosample")
self._cmd_dict.add(Capability.STOP_AUTOSAMPLE, display_name="Stop Autosample")
self._cmd_dict.add(Capability.ACQUIRE_SAMPLE, display_name="Acquire Sample")
self._cmd_dict.add(Capability.DISCOVER, display_name='Discover')
def _build_param_dict(self):
"""
Populate the parameter dictionary with THSPH parameters.
For each parameter key, add match string, match lambda function,
and value formatting function for set commands.
"""
# Add parameter handlers to parameter dict.
self._param_dict.add(Parameter.INTERVAL,
r'Auto Polled Interval = (\d+)',
lambda match: int(match.group(1)),
str,
type=ParameterDictType.INT,
units=Units.SECOND,
display_name="Polled Interval",
range=(1, 600),
description="Polling interval, internal to driver (1-600).",
visibility=ParameterDictVisibility.READ_WRITE,
startup_param=True,
direct_access=False,
default_value=5)
self._param_dict.add(Parameter.INSTRUMENT_SERIES,
r'Instrument Series = ([A-C])',
lambda match: int(match.group(1)),
str,
type=ParameterDictType.STRING,
display_name="Instrument Series",
range={'A': 'A', 'B': 'B', 'C': 'C'},
description='Defines instance of instrument series [A, B, C].',
visibility=ParameterDictVisibility.IMMUTABLE,
startup_param=True,
direct_access=False,
default_value='A')
def _filter_capabilities(self, events):
"""
Return a list of currently available capabilities.
"""
return [x for x in events if Capability.has(x)]
########################################################################
# Unknown State handlers.
########################################################################
def _handler_unknown_enter(self, *args, **kwargs):
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_unknown_exit(self, *args, **kwargs):
"""
Exit unknown state.
"""
pass
def _handler_unknown_discover(self, *args, **kwargs):
"""
Discover current state; Change next state to be COMMAND state.
"""
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
########################################################################
# Command State handlers.
########################################################################
def _handler_command_acquire_sample(self, *args, **kwargs):
"""
Get device status
"""
timeout = time.time() + TIMEOUT
next_state = None
self._do_cmd_no_resp(Command.GET_SAMPLE, timeout=TIMEOUT)
particles = self.wait_for_particles([DataParticleType.THSPH_PARSED], timeout)
return next_state, (next_state, particles)
def _handler_command_enter(self, *args, **kwargs):
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._init_params()
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
def _handler_command_exit(self, *args, **kwargs):
pass
def _handler_command_get(self, *args, **kwargs):
"""
Get device parameters from the parameter dict. First we set a baseline timestamp
that all data expirations will be calculated against. Then we try to get parameter
value. If we catch an expired parameter then we will update all parameters and get
values using the original baseline time that we set at the beginning of this method.
Assuming our _update_params is updating all parameter values properly then we can
ensure that all data will be fresh. Nobody likes stale data!
@param args[0] list of parameters to retrieve, or DriverParameter.ALL.
"""
next_state, result = self._handler_get(*args, **kwargs)
# TODO - update return signature to match other handlers - next_state, (next_state, result)
return next_state, result
def _handler_command_set(self, *args, **kwargs):
"""
Perform a set command.
@param args[0] parameter : value dict.
@retval (next_state, result) tuple, (None, None).
@throws InstrumentParameterException if missing set parameters, if set parameters not ALL and
not a dict, or if parameter can't be properly formatted.
"""
next_state = None
result = []
startup = False
# Retrieve required parameter.
# Raise if no parameter provided, or not a dict.
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
if not isinstance(params, dict):
raise InstrumentParameterException('Set parameters not a dict.')
try:
startup = args[1]
except IndexError:
pass
old_config = self._param_dict.get_config()
self._set_params(params, startup)
new_config = self._param_dict.get_config()
if old_config != new_config:
self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)
return next_state, result
def _set_params(self, *args, **kwargs):
"""
Set various parameters internally to the driver. No issuing commands to the
instrument needed for this driver.
"""
try:
params = args[0]
except IndexError:
raise InstrumentParameterException('Set command requires a parameter dict.')
# list can be null, like in the case of direct access params, in this case do nothing
if not params:
return
# Do a range check before we start all sets
for (key, val) in params.iteritems():
if key == Parameter.INTERVAL and not (0 < val < 601):
log.debug("Auto Sample Interval not in 1 to 600 range ")
raise InstrumentParameterException("sample interval out of range [1, 600]")
if key == Parameter.INSTRUMENT_SERIES:
if val not in 'ABC':
log.debug("Instrument Series is not A, B or C ")
raise InstrumentParameterException("Instrument Series is not invalid ")
else:
self._get_sample_cmd = self.THSPH_COMMANDS[val][Command.GET_SAMPLE]
log.debug('key = (%s), value = (%s)' % (key, val))
self._param_dict.set_value(key, val)
def _handler_command_start_autosample(self, *args, **kwargs):
"""
Switch into autosample mode.
@retval next_state, (next_state, result)
@throws InstrumentTimeoutException if device cannot be woken for command.
@throws InstrumentProtocolException if command could not be built or misunderstood.
"""
next_state = ProtocolState.AUTOSAMPLE
result = []
return next_state, (next_state, result)
def _handler_command_start_direct(self):
"""
Start direct access
"""
next_state = ProtocolState.DIRECT_ACCESS
result = []
return next_state, (next_state, result)
#######################################################################
# Autosample State handlers.
########################################################################
def _handler_autosample_enter(self, *args, **kwargs):
"""
Enter autosample state Because this is an instrument that must be
polled we need to ensure the scheduler is added when we are in an
autosample state. This scheduler raises events to poll the
instrument for data.
@retval next_state, (next_state, result)
"""
next_state = None
result = []
self._init_params()
self._setup_autosample_config()
# Schedule auto sample task
self._add_scheduler_event(ScheduledJob.AUTO_SAMPLE, ProtocolEvent.SCHEDULE_ACQUIRE_SAMPLE)
# Tell driver superclass to send a state change event.
# Superclass will query the state.
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
return next_state, (next_state, result)
def _setup_autosample_config(self):
"""
Set up auto sample configuration and add it to the scheduler.
"""
# Start the scheduler to poll the instrument for
# data every sample interval seconds
job_name = ScheduledJob.AUTO_SAMPLE
polled_interval = self._param_dict.get(Parameter.INTERVAL)
config = {
DriverConfigKey.SCHEDULER: {
job_name: {
DriverSchedulerConfigKey.TRIGGER: {
DriverSchedulerConfigKey.TRIGGER_TYPE: TriggerType.INTERVAL,
DriverSchedulerConfigKey.SECONDS: polled_interval
}
}
}
}
self.set_init_params(config)
# Start the scheduler if it is not running
if not self._scheduler:
self.initialize_scheduler()
def _handler_autosample_exit(self, *args, **kwargs):
"""
Exit auto sample state. Remove the auto sample task
"""
next_state = None
result = []
return next_state, (next_state, result)
def _handler_autosample_stop_autosample(self, *args, **kwargs):
"""
Remove the auto sample task. Exit Auto sample state
"""
next_state = ProtocolState.COMMAND
result = []
# Stop the Auto Poll scheduling
self._remove_scheduler(ScheduledJob.AUTO_SAMPLE)
return next_state, (next_state, result)
########################################################################
# Direct access handlers.
########################################################################
def _handler_direct_access_enter(self, *args, **kwargs):
"""
Enter direct access state.
"""
self._driver_event(DriverAsyncEvent.STATE_CHANGE)
self._sent_cmds = []
def _handler_direct_access_exit(self, *args, **kwargs):
"""
Exit direct access state.
"""
pass
def _handler_direct_access_execute_direct(self, data):
"""
Execute direct command
"""
next_state = None
result = []
self._do_cmd_direct(data)
# add sent command to list for 'echo' filtering in callback
self._sent_cmds.append(data)
return next_state, (next_state, result)
def _handler_direct_access_stop_direct(self):
next_state = ProtocolState.COMMAND
result = []
return next_state, (next_state, result)
def _build_simple_command(self, cmd, *args):
"""
Build handler for basic THSPH commands.
@param cmd the simple ooicore command to format.
@retval The command to be sent to the device.
"""
instrument_series = self._param_dict.get(Parameter.INSTRUMENT_SERIES)
if cmd == Command.GET_SAMPLE:
instrument_cmd = self.THSPH_COMMANDS[instrument_series][Command.GET_SAMPLE]
else:
raise InstrumentException('Unknown THSPH driver command %s' % cmd)
return "%s%s" % (instrument_cmd, NEWLINE)
def _wakeup(self, wakeup_timeout=0, response_timeout=0):
"""
There is no wakeup for this instrument. Do nothing.
@param wakeup_timeout The timeout to wake the device.
@param response_timeout The time to look for response to a wakeup attempt.
"""
pass
|
|
'''
MFEM example 17
How to run:
python <arguments>
Example of arguments:
ex17.py -m beam-tri.mesh
ex17.py -m beam-quad.mesh
ex17.py -m beam-tet.mesh
ex17.py -m beam-hex.mesh
ex17.py -m beam-quad.mesh -r 2 -o 3
ex17.py -m beam-quad.mesh -r 2 -o 2 -a 1 -k 1
ex17.py -m beam-hex.mesh -r 2 -o 2
'''
import sys
from mfem.common.arg_parser import ArgParser
from os.path import expanduser, join, dirname
import numpy as np
from mfem import path
import mfem.ser as mfem
from mfem.ser import intArray
#
class InitDisplacement(mfem.VectorPyCoefficient):
def __init__(self, dim):
self.dim = dim
mfem.VectorPyCoefficient.__init__(self, dim)
def EvalValue(self, x):
u = [0.0]*dim
u[-1] = -0.2*x[0]
return tuple(u)
class StressCoefficient(mfem.PyCoefficientBase):
def __init__(self, lambda_, mu_, si=0, sj=0):
super(StressCoefficient, self).__init__(0)
self.lam = lambda_ # coefficient
self.mu = mu_ # coefficient
self.si = si
self.sj = sj # component
self.u = None # displacement GridFunction
self.grad = mfem.DenseMatrix()
def SetComponent(self, i, j):
self.si = i
self.sj = j
def SetDisplacement(self, u):
self.u = u
def Eval(self, T, ip):
si, sj = self.si, self.sj
L = self.lam.Eval(T, ip)
M = self.mu.Eval(T, ip)
self.u.GetVectorGradient(T, self.grad)
if (self.si == self.sj):
div_u = self.grad.Trace()
return L * div_u + 2 * M * self.grad[si, si]
else:
return M * (self.grad[si, sj] + self.grad[sj, si])
class VisMan(object):
def __init__(self, vishost, visport):
self.host = vishost
self.port = visport
self.socks = []
self.output = None
self.win_x = 0
self.win_y = 0
self.win_w = 200 # window width
self.win_h = 150 # window height
self.stride_x = self.win_w
self.stride_y = self.win_h + 20
self.win_nx = 4 # number of windows in a row
self.sid = 0
def NewWindow(self):
self.socks.append(mfem.socketstream(self.host, self.port))
self.output = self.socks[-1]
self.output.precision(8)
self.socks
self.sid = self.sid + 1
def CloseConnection(self):
self.socks = []
del self.output
self.output = None
def PositionWindow(self):
if self.output is None:
return
sid = self.sid
command = ("window_geometry " +
str(self.win_x + self.stride_x*(sid % self.win_nx)) +
' ' +
str(self.win_y + self.stride_y*(sid/self.win_nx)) +
' ' + str(self.win_w) + ' ' + str(self.win_h))
self.output.send_text(command)
self.output.flush()
def send_solution(self, mesh, x):
if self.output is None:
return
self.output.send_solution(mesh, x)
def send_text(self, x):
if self.output is None:
return
self.output.send_text(x)
def flush(self):
if self.output is None:
return
self.output.flush()
parser = ArgParser(description='Ex17')
parser.add_argument('-m', '--mesh',
default='beam-tri.mesh',
action='store', type=str,
help='Mesh file to use.')
parser.add_argument('-r', '--refine',
action='store', default=-1, type=int,
help="Number of times to refine the mesh uniformly, -1 for auto.")
parser.add_argument('-o', '--order',
action='store', default=1, type=int,
help="Finite element order (polynomial degree)")
parser.add_argument('-a', '--alpha',
action='store', default=-1.0, type=float,
help='\n'.join(["One of the two DG penalty parameters, typically +1/-1."
" See the documentation of class DGElasticityIntegrator."]))
parser.add_argument('-k', '--kappa',
action='store', default=-1.0, type=float,
help='\n'.join(["One of the two DG penalty parameters, should be positve."
" Negative values are replaced with (order+1)^2."]))
parser.add_argument('-vis', '--visualization',
action='store_true',
help='Enable GLVis visualization')
args = parser.parse_args()
ref_levels = args.refine
order = args.order
alpha = args.alpha
kappa = args.kappa
visualization = args.visualization
if (kappa < 0):
kappa = (order+1.)*(order+1.)
args.kappa = kappa
parser.print_options(args)
# 2. Read the mesh from the given mesh file
meshfile = expanduser(
join(dirname(__file__), '..', 'data', args.mesh))
mesh = mfem.Mesh(meshfile, 1, 1)
dim = mesh.Dimension()
if (mesh.attributes.Max() < 2 or
mesh.bdr_attributes.Max() < 2):
print("\n".join(["Input mesh should have at least two materials and ",
"two boundary attributes! (See schematic in ex17.cpp)\n"]))
sys.exit()
# 3. Refine the mesh to increase the resolution.
ref_levels = int(np.floor(np.log(5000./mesh.GetNE())/np.log(2.)/dim))
for x in range(ref_levels):
mesh.UniformRefinement()
# Since NURBS meshes do not support DG integrators, we convert them to
# regular polynomial mesh of the specified (solution) order.
if (mesh.NURBSext):
mesh.SetCurvature(order)
# 4. Define a DG vector finite element space on the mesh. Here, we use
# Gauss-Lobatto nodal basis because it gives rise to a sparser matrix
# compared to the default Gauss-Legendre nodal basis.
fec = mfem.DG_FECollection(order, dim, mfem.BasisType.GaussLobatto)
fespace = mfem.FiniteElementSpace(mesh, fec, dim)
print('Number of finite element unknowns: ' + str(fespace.GetVSize()))
print('Assembling:')
# 5. In this example, the Dirichlet boundary conditions are defined by
# marking boundary attributes 1 and 2 in the marker Array 'dir_bdr'.
# These b.c. are imposed weakly, by adding the appropriate boundary
# integrators over the marked 'dir_bdr' to the bilinear and linear forms.
# With this DG formulation, there are no essential boundary conditions.
ess_tdof_list = intArray()
dir_bdr = intArray(mesh.bdr_attributes.Max())
dir_bdr.Assign(0)
dir_bdr[0] = 1 # boundary attribute 1 is Dirichlet
dir_bdr[1] = 1 # boundary attribute 2 is Dirichlet
# 6. Define the DG solution vector 'x' as a finite element grid function
# corresponding to fespace. Initialize 'x' using the 'InitDisplacement'
# function.
x = mfem.GridFunction(fespace)
init_x = InitDisplacement(dim)
x.ProjectCoefficient(init_x)
# 7. Set up the Lame constants for the two materials. They are defined as
# piece-wise (with respect to the element attributes) constant
# coefficients, i.e. type PWConstCoefficient.
lamb = mfem.Vector(mesh.attributes.Max()) # lambda is not possible in python
lamb.Assign(1.0)
lamb[0] = 50.
lambda_c = mfem.PWConstCoefficient(lamb)
mu = mfem.Vector(mesh.attributes.Max())
mu.Assign(1.0)
mu[0] = 50.0
mu_c = mfem.PWConstCoefficient(mu)
# 8. Set up the linear form b(.) which corresponds to the right-hand side of
# the FEM linear system. In this example, the linear form b(.) consists
# only of the terms responsible for imposing weakly the Dirichlet
# boundary conditions, over the attributes marked in 'dir_bdr'. The
# values for the Dirichlet boundary condition are taken from the
# VectorFunctionCoefficient 'x_init' which in turn is based on the
# function 'InitDisplacement'.
b = mfem.LinearForm(fespace)
print('r.h.s ...')
integrator = mfem.DGElasticityDirichletLFIntegrator(
init_x, lambda_c, mu_c, alpha, kappa)
b.AddBdrFaceIntegrator(integrator, dir_bdr)
b.Assemble()
# 9. Set up the bilinear form a(.,.) on the DG finite element space
# corresponding to the linear elasticity integrator with coefficients
# lambda and mu as defined above. The additional interior face integrator
# ensures the weak continuity of the displacement field. The additional
# boundary face integrator works together with the boundary integrator
# added to the linear form b(.) to impose weakly the Dirichlet boundary
# conditions.
a = mfem.BilinearForm(fespace)
a.AddDomainIntegrator(mfem.ElasticityIntegrator(lambda_c, mu_c))
a.AddInteriorFaceIntegrator(
mfem.DGElasticityIntegrator(lambda_c, mu_c, alpha, kappa))
a.AddBdrFaceIntegrator(mfem.DGElasticityIntegrator(
lambda_c, mu_c, alpha, kappa), dir_bdr)
print('matrix ...')
a.Assemble()
A = mfem.SparseMatrix()
B = mfem.Vector()
X = mfem.Vector()
a.FormLinearSystem(ess_tdof_list, x, b, A, X, B)
print('...done')
A.PrintInfo()
'''
Note: extension of ostream &
A.PrintInfo() # output to std::cout
A.PrintInfo("matrix info") # output to file
Above two are the same as
from mfem._ser.io_stream import STDOUT, wFILE
A.PrintInfo(STDOUT)
A.PrintInfo(wFILE("matrix_info"))
'''
# 11. Define a simple symmetric Gauss-Seidel preconditioner and use it to
# solve the system Ax=b with PCG for the symmetric formulation, or GMRES
# for the non-symmetric.
M = mfem.GSSmoother(A)
rtol = 1e-6
if (alpha == -1.0):
mfem.PCG(A, M, B, X, 3, 5000, rtol*rtol, 0.0)
else:
mfem.GMRES(A, M, B, X, 3, 5000, 50, rtol*rtol, 0.0)
# 12. Recover the solution as a finite element grid function 'x'.
a.RecoverFEMSolution(X, b, x)
# 13. Use the DG solution space as the mesh nodal space. This allows us to
# save the displaced mesh as a curved DG mesh.
mesh.SetNodalFESpace(fespace)
reference_nodes = mfem.Vector()
if (visualization):
reference_nodes.Assign(mesh.GetNodes())
# 14. Save the displaced mesh and minus the solution (which gives the
# backward displacements to the reference mesh). This output can be
# viewed later using GLVis: "glvis -m displaced.mesh -g sol.gf".
nodes = mesh.GetNodes()
nodes += x
x.Neg()
mesh.Print('displaced.mesh', 8)
x.Save('sol.gf', 8)
# 15. Visualization: send data by socket to a GLVis server.
if (visualization):
vis = VisMan("localhost", 19916)
glvis_keys = "Rjlc" if (dim < 3) else "c"
vis.NewWindow()
vis.send_solution(mesh, x)
vis.send_text("keys " + glvis_keys)
vis.send_text("window_title 'Deformed configuration'")
vis.send_text("plot_caption 'Backward displacement'")
vis.PositionWindow()
vis.CloseConnection()
c = "xyz"
scalar_dg_space = mfem.FiniteElementSpace(mesh, fec)
stress = mfem.GridFunction(scalar_dg_space)
stress_c = StressCoefficient(lambda_c, mu_c)
mesh.GetNodes().Assign(reference_nodes)
x.Neg()
stress_c.SetDisplacement(x)
def make_plot(si, sj):
stress_c.SetComponent(si, sj)
stress.ProjectCoefficient(stress_c)
vis.NewWindow()
vis.send_solution(mesh, stress)
vis.send_text("keys " + glvis_keys)
vis.send_text("window_title |Stress" + c[si] + c[sj] + "|")
vis.PositionWindow()
vis.CloseConnection()
for si in range(dim):
for jj in range(dim-si):
make_plot(si, si+jj)
|
|
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Array parameter type testcases : INT16 Array
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT16 Array = 16bits signed int array :
- Array size : 5
- values range : [-50, 50]
Test cases :
------------
- Testing nominal case
- Testing minimum
- Testing minimum overflow
- Testing maximum
- Testing maximum overflow
- Testing array index out of bounds
"""
import os
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
from ctypes import c_uint16
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/INT16_ARRAY"
self.param_short_name = os.environ["PFW_RESULT"] + "/INT16_ARRAY"
print('\r')
self.pfw.sendCmd("setTuningMode", "on")
print('\r')
self.array_size = 5
self.array_min = -50
self.array_max = 50
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing INT16_ARRAY Nominal Case
--------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every INT16_ARRAY elements to autorized values
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
for index in range (self.array_size):
indexed_array_value = index + self.array_min
if indexed_array_value>self.array_max:
indexed_array_value=self.array_max
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Min_Value(self):
"""
Testing INT16_ARRAY minimum value
---------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every INT16_ARRAY elements to minimum values : 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Min_Value.__doc__)
index = 0
indexed_array_value = self.array_min
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Min_Value_Overflow(self):
"""
Testing INT16_ARRAY parameter values out of negative range
----------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every INT16_ARRAY elements to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Min_Value_Overflow.__doc__)
index = 0
indexed_array_value = self.array_min
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check initial parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
param_check = open(self.param_short_name).read().splitlines()[index]
#Check final parameter value setting
indexed_array_value = indexed_array_value - 1
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=False)
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting parameter %s[%s] out of bounds"
% (self.param_name, str(index)))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == param_check, log.F("FILESSYSTEM : %s[%s] forbiden update"
% (self.param_name, str(index)))
def test_Max_Value(self):
"""
Testing INT16_ARRAY maximum value
---------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every INT16_ARRAY elements to maximum values : 15
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements correctly recorded
- Blackboard and filesystem values checked
"""
log.D(self.test_Max_Value.__doc__)
index = 0
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
hex_indexed_array_value = hex(c_uint16(indexed_array_value).value)
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", str(indexed_array_value_path), "")
assert err == None, log.E("when setting parameter %s[%s] : %s"
% (self.param_name, str(index), err))
assert out == str(indexed_array_value), log.F("BLACKBOARD : Incorrect value for %s[%s], expected: %s, found: %s"
% (self.param_name, str(index), str(indexed_array_value), out))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == hex_indexed_array_value, log.F("FILESSYSTEM : %s[%s] update error"
% (self.param_name, str(index)))
def test_Max_Value_Overflow(self):
"""
Testing INT16_ARRAY parameter values out of positive range
----------------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set every INT16_ARRAY elements to 16
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Max_Value_Overflow.__doc__)
index = 0
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check initial parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value))
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
param_check = open(self.param_short_name).read().splitlines()[index]
#Check final parameter value setting
indexed_array_value = indexed_array_value + 1
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=False)
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting parameter %s[%s] out of bounds"
% (self.param_name, str(index)))
#Check parameter value on filesystem
indexed_files_system_array_value = open(self.param_short_name).read().splitlines()[index]
assert indexed_files_system_array_value == param_check, log.F("FILESSYSTEM : %s[%s] forbiden update"
% (self.param_name, str(index)))
def test_Array_Index_Overflow(self):
"""
Testing Array index out of bounds
---------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- Set an out of bounds array indexed element
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- INT16_ARRAY array elements not recorded
- Error correctly detected
"""
log.D(self.test_Array_Index_Overflow.__doc__)
index_values = (self.array_size-1, self.array_size+1, -1)
for index in index_values:
print(index)
indexed_array_value = self.array_max
indexed_array_value_path = "".join([self.param_name, "/", str(index)])
#Check parameter value setting
out, err = self.pfw.sendCmd("setParameter", str(indexed_array_value_path), str(indexed_array_value), expectSuccess=None)
if index in [0, self.array_size-1]:
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out == "Done", log.F("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), out))
else:
assert err == None, log.E("when setting parameter %s[%s]: %s"
% (self.param_name, str(index), err))
assert out != "Done", log.F("Error not detected when setting array %s index out of bounds"
% (self.param_name))
|
|
from tweet_preprocess import load_df
from textblob import TextBlob
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import seaborn as sns
import cartopy
# set graph display options
pd.set_option('display.max_colwidth', 200)
pd.options.display.mpl_style = 'default'
matplotlib.style.use('ggplot')
sns.set_context('talk')
sns.set_style('darkgrid')
# load captured tweets
df = load_df('/Users/alanseciwa/Desktop/results3.csv')
# See the overall count relating to the keys
df.info()
# prints out first row from tweets
print(df[['candidate', 'created_at', 'lang', 'place', 'user_followers_count',
'user_time_zone', 'polarity', 'influenced_polarity', 'text']].head(1))
# find polarity of ONLY english words and display the set
# the textblob function translate() could be used
english_df = df[df.lang == 'en']
english_df.sort('polarity', ascending=False).head(3)[['candidate', 'polarity', 'subjectivity', 'text']]
# Find mean polarity for each candidate by looking at the influenced_polarity.
# this takes into account the number of retweets and number of followers
candidate_group = english_df.groupby('candidate')
print(candidate_group[['polarity', 'influence', 'influenced_polarity']].mean())
# Look at the influential Tweets about Donald Trump and Bernie Sanders
'''
jeb = candidate_group.get_group('Jeb Bush')
jeb_influence = jeb.sort('influence', ascending=False)
print('')
print('-----------')
print('Jeb Bush')
print('-----------')
print(jeb_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5))
print('')
print('-----------')
print(df[df.user_name == 'Jeb Bush'].groupby('candidate').size())
'''
# Trump
trump = candidate_group.get_group('Donald Trump')
trump_influence = trump.sort('influence', ascending=False)
print('--------------')
print('Donald Trump')
print('--------------')
trump_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5)
# Sanders
sanders = candidate_group.get_group('Bernie Sanders')
sanders_influence = sanders.sort('influence', ascending=False)
print('--------------')
print('Bernie Sanders')
print('--------------')
sanders_influence[['influence', 'polarity', 'influenced_polarity', 'user_name', 'text', 'created_at']].head(5)
# LANGUAGE
# display who are all twitter from different languages
print('')
print('Language')
lang_group = df.groupby(['candidate', 'lang'])
print(lang_group.size())
# graph the languages
print('')
l_lang = lang_group.filter(lambda group: len(group) > 10)
# get rid of english language
non_eng = l_lang[l_lang.lang != 'en']
non_eng_grp = non_eng.groupby(['lang', 'candidate'], as_index = False)
non_eng_grp
print('')
print('ploting...')
s = non_eng_grp.text.agg(np.size)
s = s.rename(columns={'text': 'count'})
s_pivot_dis = s.pivot_table(index='lang', columns='candidate', values='count', fill_value=0)
plot = sns.heatmap(s_pivot_dis)
plot.set_title('Number of non-English Tweets by Candidate')
plot.set_ylabel('language code')
plot.set_xlabel('candidate')
plot.figure.set_size_inches(12, 7)
print('')
print('ending plotting')
# Time-influence polarity over time for each candidate
mean_pol = df.groupby(['candidate', 'created_at']).influenced_polarity.mean()
plot = mean_pol.unstack('candidate').resample('60min').plot()
plot.set_title('Influence Polarity Over Time for Candidates')
plot.set_ylabel('Influence Polarity')
plot.set_xlabel('Time')
plot.figure.set_size_inches(15, 9)
# Get top languages
lang_size =df.groupby('lang').size()
th = lang_size.quantile(.75)
top_lang_df = lang_size[lang_size > th]
top_lang = set(top_lang_df.index) - {'und'}
print(top_lang)
# Get tweet frequency
df['hour'] = df.created_at.apply(lambda datetime: datetime.hour)
for lang_code in top_lang:
l_df = df[df.lang == lang_code]
normalized_freq = l_df.groupby('hour').size() / l_df.lang.count()
plot = normalized_freq.plot(label = lang_code)
plot.set_title('Tweet Frequency by hour of day')
plot.set_ylabel('frequency')
plot.set_xlabel('hr of day')
plot.legend()
plot.figure.set_size_inches(10, 8)
# find the uniqueness of tweets
spike_interest = df[(df.hour == 23) & (df.lang == 'in')]
print('Number of tweets:', spike_interest.text.count())
print('Number of unique users:', spike_interest.user_name.unique().size)
#investigate spike from Indonesia
spike_interest.text.head(10).unique()
# Find the Timezone of tweets in different locations with Influenced_Polarity
timez_df = english_df.dropna(subset=['user_time_zone'])
us_timez_df = timez_df[timez_df.user_time_zone.str.contains('US & Canada')]
us_timez_candidate_group = us_timez_df.groupby(['candidate', 'user_time_zone'])
us_timez_candidate_group.influenced_polarity.mean()
# Graph timezone on a map
timez_map = cartopy.io.shapereader.Reader("/Users/alanseciwa/Desktop/World_Maps/tz_world_mp.shp")
timez_rec = list(timez_map.records())
timez_trans = {
'Eastern Time (US & Canada)': 'America/New_York',
'Central Time (US & Canada)': 'America/Chicago',
'Mountain Time (US & Canada)': 'America/Denver',
'Pacific Time (US & Canada)': 'America/Los_Angeles',
}
america_timez_rec = {
timez_name: next(filter(lambda record: record.attributes['TZID'] == timez_id, timez_rec))
for timez_name, timez_id
in timez_trans.items()
}
# -----
aea = cartopy.crs.AlbersEqualArea(-95, 35)
pc = cartopy.crs.PlateCarree()
state_province = cartopy.feature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='50m',
facecolor='none'
)
c_map = [matplotlib.cm.Blues, matplotlib.cm.Greens, matplotlib.cm.Reds, matplotlib.cm.Oranges]
norm = matplotlib.colors.Normalize(vmin=0, vmax=40)
candidates = df['candidate'].unique()
for i, c in enumerate(candidates):
plt.figure()
plot = plt.axes(projection=aea)
plot.set_extent((-125, -66, 20, 50))
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.BORDERS)
plot.add_feature(state_province, edgecolor='gray')
plot.add_feature(cartopy.feature.LAKES, facecolor='#00BCD4')
for j, r in america_timez_rec.items():
timez_spec_df = us_timez_df[us_timez_df.user_time_zone == j]
timez_cand_spec_df = timez_spec_df[timez_spec_df.candidate == c]
mean_pol = timez_cand_spec_df.influenced_polarity.mean()
plot.add_geometries(
[r.geometry],
crs = pc,
color = c_map[i](norm(mean_pol)),
alpha = 0.8
)
plot.set_title('Influenced Polarity towards {} by U.S. Timezone'.format(c))
plot.figure.set_size_inches(7, 4)
plt.show()
print()
# Find the Twitter users outside of the U.S.
american_timez = ('US & Canada|Canada|Arizona|America|Hawaii|Indiana|Alaska'
'|New_York|Chicago|Los_Angeles|Detroit|CST|PST|EST|MST')
foreign_timez_df = timez_df[~timez_df.user_time_zone.str.contains(american_timez)]
foreign_timez_grp = foreign_timez_df.groupby('user_time_zone')
foreign_timez_grp.size().sort(inplace=False, ascending=False).head(25)
# find Foreign timezones and influenced_polarity of candidates
foreign_english_timez_df = foreign_timez_df[foreign_timez_df.lang == 'en']
foreign_timez_grp2 = foreign_english_timez_df.groupby(['candidate', 'user_time_zone'])
top_foreign_timez_df = foreign_timez_grp2.filter(lambda group: len(group) > 40)
top_foreign_timez_grp = top_foreign_timez_df.groupby(['user_time_zone', 'candidate'], as_index=False)
mean_infl_pol = top_foreign_timez_grp.influenced_polarity.mean()
pivot = mean_infl_pol.pivot_table(
index='user_time_zone',
columns='candidate',
values='influenced_polarity',
fill_value=0
)
plot = sns.heatmap(pivot)
plot.set_title('Influenced Polarity in Major Foreign (timezones) Regions by Candidate')
plot.set_ylabel('City', family='Ubuntu')
plot.set_xlabel('Influenced Polarity by Candidate')
plot.figure.set_size_inches(10, 9)
# Find the Geolocation of Tweets made
geo_df = df.dropna(subset=['place'])
mollweide = cartopy.crs.Mollweide()
plot = plt.axes(projection=mollweide)
plot.set_global()
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.BORDERS)
plot.scatter(
list(geo_df.longitude),
list(geo_df.latitude),
transform=pc,
zorder=2
)
plot.set_title('International Twitter Users W/Enabled Geo Data')
plot.figure.set_size_inches(14, 9)
# Plot Twitter user in the US
plot = plt.axes(projection=aea)
## this set the size of the map. If other portions of the
## map need to be accessed, the these coordinates
plot.set_extent((-150, 60, -25, 60))
# <fix> need to fix, state border lines are not showing
plot.add_feature(state_province, edgecolor='black')
plot.add_feature(cartopy.feature.COASTLINE)
plot.add_feature(cartopy.feature.LAND)
plot.add_feature(cartopy.feature.BORDERS)
plot.add_feature(cartopy.feature.LAKES)
candidate_grp2 = geo_df.groupby('candidate', as_index = False)
# Colors for the legend table
colors = ['#DC143C', '#0000FF', '#FFD700', '#9932CC']
# Go through loop to display the coordinates
for i, (can, grp) in enumerate(candidate_grp2):
longitudes = grp.longitude.values
latitudes = grp.latitude.values
plot.scatter(
longitudes,
latitudes,
transform=pc,
color=colors[i],
label=can,
zorder=2
)
plot.set_title('Twitter Users by Candidate')
plt.legend(loc='lower right')
plot.figure.set_size_inches(12, 7)
|
|
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functional LSTM RNN layer with LSTM cells fused into FRNN to run input ...
projections in parallel.
"""
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_layer
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import recurrent
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import rnn_cell
from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import rnn_layers
class LSTMCellExt(object):
"""Extends LSTM-based cell classes with extra methods for parallelizing ...
input projections across steps.
"""
def ProjectInputSequence(self, theta, inputs):
"""Applies input projection for the entire sequence.
Args:
theta: a NestedMap of layer weights. Notably, it's expected to contain
separate weight tensors for input and hidden state projections, for
performance reasons, under the key 'wm_i' (input) and 'wm_h'
inputs: A NestedMap with the following fields:
- act: A list of Tensors of shape [seqlen, batch, input_dim].
Returns:
A Tensor of shape [seqlen, batch, 4 * hidden_dim].
"""
assert isinstance(inputs.act, list)
if len(inputs.act) > 1:
x = tf.concat(inputs.act, -1)
else:
x = inputs.act[0]
# [T, B, 4 * H]
proj_inputs = tf.einsum('TBD,DH->TBH', x, theta.wm_i)
return proj_inputs
def _MixWithProjectedInput(self, theta, state0, inputs):
"""Computes _Mix() with inputs already projected.
Args:
theta: a NestedMap of layer weights. Notably, it's expected to contain
separate weight tensors for input and hidden state projections, for
performance reasons, under the key 'wm_i' (input) and 'wm_h'
state0: A NestedMap with the same structure as return value of
`self.zero_state()`.
inputs: A Tensor of shape [batch, 4 * hidden_dim].
Returns:
A Tensor of the same shape as `inputs`.
"""
proj_m = tf.matmul(state0.m, theta.wm_h)
return inputs + proj_m
def FPropWithProjectedInput(self, theta, state0, inputs):
"""FProp with inputs already projected.
This method is for parallelizing the input projection across time steps to
accelerate training.
The following are equivalent:
>>> inputs = <a tensor of [T, B, D]>
>>> paddings = tf.zeros([T, B])
>>> theta = cell.theta
>>> state = cell.zero_state(theta, B)
# a. Use FProp().
>>> for i in range(T):
... state, _ = cell.FProp(theta, inputs[i, :, :], paddings, state)
# b. Use FPropWithProjectedInput().
>>> proj_inputs = cell.ProjectInputSequence(theta, inputs)
>>> for i in range(T):
... state, _ = cell.FPropWithProjectedInputs(
... theta, proj_inputs[i, :, :], paddings, state)
Args:
theta: a NestedMap of layer weights. Notably, it's expected to contain
separate weight tensors for input and hidden state projections, for
performance reasons, under the key 'wm_i' (input) and 'wm_h' (hidden
state).
state0: A NestedMap with the same structure as return value of
`self.zero_state()`.
inputs: A NestedMap with the following fields:
- proj_inputs: A single Tensors of shape [batch, 4 * hidden_dim].
- padding: A Tensor of shape [batch, 1].
- reset_mask: A Tensor of shape [batch, 1].
Returns:
state1: A NestedMap of the same structure as `state0`.
extras: Intermediate results to facilitate backprop. A NestedMap.
"""
if self.params.reset_cell_state:
state0_modified = self._ResetState(state0.DeepCopy(), inputs)
else:
state0_modified = state0
xmw = self._MixWithProjectedInput(theta, state0_modified,
inputs.proj_inputs)
gates_input = inputs.copy()
gates_input.act = [inputs.proj_inputs]
state1 = self._Gates(xmw, theta, state0_modified, gates_input)
return state1, py_utils.NestedMap()
class LSTMCellSimpleExt(rnn_cell.LSTMCellSimple, LSTMCellExt):
"""Extends LSTMCellSimple with extra methods for parallelizing ...
input projections across steps.
"""
pass
class LayerNormalizedLSTMCellSimpleExt(rnn_cell.LayerNormalizedLSTMCellSimple,
LSTMCellExt):
"""Extends LayerNormalizedLSTMCellSimple with extra methods for ...
parallelizing input projections across steps.
"""
pass
class LayerNormalizedLSTMCellLeanExt(rnn_cell.LayerNormalizedLSTMCellLean,
LSTMCellExt):
"""Extends LayerNormalizedLSTMCellLean with extra methods for parallelizing ...
input projections across steps.
"""
pass
class LstmFRNN(base_layer.BaseLayer):
"""A FRNN for LSTMCellSimple or LayerNormalizedLSTMCellLean cell.
It exploits the parallelism in input projection across time steps, and is in
general faster than the combination of LayerNormalizedLSTMCellLean and FRNN.
"""
@classmethod
def Params(cls):
p = super(LstmFRNN, cls).Params()
p.Define('packed_input', False, 'To reset states for packed inputs.')
p.Define(
'cell', None,
'Configs for the RNN cell. Supported classes are LSTMCellSimpleExt, '
'LayerNormalizedLSTMCellLeanExt.')
p.Define('reverse', False,
'Whether or not to unroll the sequence in reversed order.')
return p
@base_layer.initializer
def __init__(self, params):
super(LstmFRNN, self).__init__(params)
p = self.params
if p.cell.cls not in (LSTMCellSimpleExt, LayerNormalizedLSTMCellSimpleExt,
LayerNormalizedLSTMCellLeanExt):
raise ValueError(
'Only LSTMCellSimpleExt, LayerNormalizedLSTMCellSimpleExt and '
'LayerNormalizedLSTMCellLeanExt are supported, got {}.'.format(
p.cell.cls.__name__))
self.CreateChild('cell', p.cell)
def zero_state(self, theta, batch_size):
return self.cell.zero_state(theta.cell, batch_size)
def FProp(self, theta, inputs, paddings, state0=None, segment_id=None):
"""Computes LSTM forward pass.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: A single tensor or a tuple of tensors with cardinality equal to
rnn_cell.inputs_arity. For every input tensor, the first dimension is
assumed to be time, second dimension batch, and third dimension depth.
paddings: A tensor. First dim is time, second dim is batch, and third dim
is expected to be 1.
state0: If not None, the initial rnn state in a `.NestedMap`. Defaults to
the cell's zero-state.
segment_id: A tensor to support packed inputs. First dim is time, second
dim is batch, and third dim is expected to be 1.
Returns:
A tensor of [time, batch, dims].
The final recurrent state.
"""
p = self.params
rcell = self.cell
assert isinstance(rcell, (rnn_cell.RNNCell))
if not isinstance(inputs, (list, tuple)):
inputs = [inputs]
# Slicing wm to wm_{i,h} outside the loop to get 20% speedup over regular
# LSTM baseline.
# Keeping slicing within the loop gives only < 3% speedup.
cell_theta = theta.cell.copy()
num_input_nodes = p.cell.num_input_nodes
cell_theta['wm_i'] = cell_theta.wm[:num_input_nodes, :]
cell_theta['wm_h'] = cell_theta.wm[num_input_nodes:, :]
tf.logging.vlog(1, 'cell_theta: %r', cell_theta)
if p.packed_input:
assert segment_id is not None
reset_mask = rnn_layers.GeneratePackedInputResetMask(
segment_id, is_reverse=False)
reset_mask = py_utils.HasShape(reset_mask, tf.shape(paddings))
else:
reset_mask = tf.zeros_like(paddings)
if p.reverse:
inputs = [tf.reverse(x, [0]) for x in inputs]
paddings = tf.reverse(paddings, [0])
reset_mask = tf.reverse(reset_mask, [0])
if not state0:
batch_size = py_utils.GetShape(paddings)[1]
state0 = rcell.zero_state(cell_theta, batch_size)
# [T, B, H]
proj_inputs = rcell.ProjectInputSequence(cell_theta,
py_utils.NestedMap(act=inputs))
proj_inputs = py_utils.NestedMap(
proj_inputs=proj_inputs, padding=paddings, reset_mask=reset_mask)
acc_state, final_state = recurrent.Recurrent(
theta=cell_theta,
state0=state0,
inputs=proj_inputs,
cell_fn=rcell.FPropWithProjectedInput,
cell_type=rcell.layer_type,
accumulator_layer=self,
allow_implicit_capture=p.allow_implicit_capture)
act = rcell.GetOutput(acc_state)
if p.reverse:
act = tf.reverse(act, [0])
return act, final_state
|
|
import nose
import unittest
from soap.datatype import auto_type, int_type, float_type, IntegerArrayType
from soap.expression import expression_factory, operators, Variable, Subscript
from soap.semantics import IntegerInterval, ErrorSemantics
from soap.program.flow import (
AssignFlow, IfFlow, WhileFlow, ForFlow, CompositionalFlow,
PragmaInputFlow, PragmaOutputFlow, ProgramFlow
)
from soap.parser import stmt_parse, expr_parse, parse
class Base(unittest.TestCase):
def setUp(self):
self.a = Variable('a', IntegerArrayType([10]))
self.w = Variable('w', int_type)
self.x = Variable('x', auto_type)
self.y = Variable('y', auto_type)
self.z = Variable('z', auto_type)
self.i1 = IntegerInterval(1)
self.i2 = IntegerInterval(2)
self.i3 = IntegerInterval(3)
self.decl = {var.name: auto_type for var in (self.x, self.y, self.z)}
self.expr_parse = lambda expr: expr_parse(expr, self.decl)
class TestExpressionParser(Base):
def test_compound_boolean_expression(self):
bool_expr_1 = expression_factory(
operators.UNARY_NEGATION_OP, expression_factory(
operators.LESS_OP, self.x, self.i3))
bool_expr_2 = expression_factory(
operators.NOT_EQUAL_OP, self.y, self.i1)
bool_expr = expression_factory(
operators.AND_OP, bool_expr_1, bool_expr_2)
self.assertEqual(expr_parse('!(x < 3) && y != 1'), bool_expr)
def test_operator_precedence(self):
neg_y = expression_factory(operators.UNARY_SUBTRACT_OP, self.y)
expr = expression_factory(
operators.ADD_OP, self.x,
expression_factory(operators.MULTIPLY_OP, neg_y, self.z))
self.assertEqual(expr_parse('x + -y * z'), expr)
expr = expression_factory(
operators.MULTIPLY_OP,
expression_factory(operators.ADD_OP, self.x, neg_y),
self.z)
self.assertEqual(expr_parse('(x + -y) * z'), expr)
def test_special_unary_arithmetic_expression(self):
xpy = expression_factory(operators.ADD_OP, self.x, self.y)
expr = expression_factory(operators.EXPONENTIATE_OP, xpy)
self.assertEqual(expr_parse('exp(x + y)'), expr)
def test_select_expression(self):
expr = expression_factory(
operators.TERNARY_SELECT_OP,
expression_factory(operators.LESS_OP, self.x, self.i3),
expression_factory(operators.ADD_OP, self.y, self.i1),
expression_factory(operators.MULTIPLY_OP, self.y, self.i2))
self.assertEqual(expr_parse('x < 3 ? y + 1 : y * 2'), expr)
def test_variable_subscript(self):
expr = expression_factory(
operators.INDEX_ACCESS_OP, self.x, Subscript(self.i1))
self.assertEqual(expr_parse('x[1]'), expr)
expr = expression_factory(
operators.INDEX_ACCESS_OP, self.x,
Subscript(expression_factory(operators.ADD_OP, self.y, self.i1)))
self.assertEqual(expr_parse('x[y + 1]'), expr)
expr = expression_factory(
operators.INDEX_ACCESS_OP, self.x, Subscript(self.y, self.i1))
self.assertEqual(expr_parse('x[y][1]'), expr)
expr = expression_factory(
operators.INDEX_ACCESS_OP, self.x,
Subscript(expression_factory(
operators.INDEX_ACCESS_OP, self.y, Subscript(self.i1))))
self.assertEqual(expr_parse('x[y[1]]'), expr)
class TestStatementParser(Base):
def setUp(self):
super().setUp()
self.stmt_parse = lambda prog: stmt_parse(prog, self.decl)
def test_assign_statement(self):
expr = expression_factory(
operators.ADD_OP, self.y, self.i1)
flow = AssignFlow(self.x, expr)
self.assertEqual(self.stmt_parse('x = y + 1;'), flow)
def test_boolean_assign_statement(self):
raise nose.SkipTest # can't bother with this now
expr = expression_factory(
operators.LESS_EQUAL_OP, self.y, self.i1)
flow = AssignFlow(self.x, expr)
self.assertEqual(self.stmt_parse('x = y < 1;'), flow)
def test_declaration_assign_statement(self):
flow = AssignFlow(self.w, self.i1)
self.assertEqual(self.stmt_parse('int w = 1;'), flow)
def test_declaration_statement(self):
self.stmt_parse('int w;')
self.stmt_parse('float a[10][10];')
def test_operator_assign_statement(self):
expr = expression_factory(
operators.ADD_OP, self.x, self.i1)
flow = AssignFlow(self.x, expr)
self.assertEqual(self.stmt_parse('x += 1;'), flow)
def test_increment_statement(self):
expr = expression_factory(
operators.ADD_OP, self.x, self.i1)
flow = AssignFlow(self.x, expr)
self.assertEqual(self.stmt_parse('x++;'), flow)
def test_if_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
assign_flow_1 = AssignFlow(self.y, self.x)
assign_flow_2 = AssignFlow(self.x, self.y)
flow = IfFlow(bool_expr, assign_flow_1)
self.assertEqual(self.stmt_parse('if (x < 3) {y = x;}'), flow)
flow = IfFlow(bool_expr, assign_flow_1, assign_flow_2)
self.assertEqual(
self.stmt_parse('if (x < 3) {y = x;} else {x = y;}'), flow)
def test_single_line_if_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
assign_flow_1 = AssignFlow(self.y, self.x)
assign_flow_2 = AssignFlow(self.x, self.y)
flow = IfFlow(bool_expr, assign_flow_1)
self.assertEqual(self.stmt_parse('if (x < 3) y = x;'), flow)
flow = IfFlow(bool_expr, assign_flow_1, assign_flow_2)
self.assertEqual(
self.stmt_parse('if (x < 3) y = x; else x = y;'), flow)
def test_while_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
assign_flow = AssignFlow(self.y, self.x)
flow = WhileFlow(bool_expr, assign_flow)
self.assertEqual(self.stmt_parse('while (x < 3) {y = x;}'), flow)
def test_single_line_while_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
assign_flow = AssignFlow(self.y, self.x)
flow = WhileFlow(bool_expr, assign_flow)
self.assertEqual(self.stmt_parse('while (x < 3) y = x;'), flow)
def test_for_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
init_flow = AssignFlow(self.x, self.i1)
incr_flow = AssignFlow(self.x, expression_factory(
operators.ADD_OP, self.x, self.i1))
assign_flow = AssignFlow(self.y, self.x)
flow = ForFlow(init_flow, bool_expr, incr_flow, assign_flow)
parsed_flow = self.stmt_parse('for (x = 1; x < 3; x = x + 1) {y = x;}')
self.assertEqual(parsed_flow, flow)
def test_single_line_for_statement(self):
bool_expr = expression_factory(operators.LESS_OP, self.x, self.i3)
init_flow = AssignFlow(self.x, self.i1)
incr_flow = AssignFlow(self.x, expression_factory(
operators.ADD_OP, self.x, self.i1))
assign_flow = AssignFlow(self.y, self.x)
flow = ForFlow(init_flow, bool_expr, incr_flow, assign_flow)
parsed_flow = self.stmt_parse('for (x = 1; x < 3; x = x + 1) y = x;')
self.assertEqual(parsed_flow, flow)
def test_compound_statement(self):
flow = CompositionalFlow(
[AssignFlow(self.y, self.x), AssignFlow(self.x, self.y)])
self.assertEqual(self.stmt_parse('y = x; x = y;'), flow)
class TestProgramParser(Base):
def setUp(self):
super().setUp()
self.w = Variable('w', float_type)
self.x = Variable('x', int_type)
self.y = Variable('y', float_type)
self.z = Variable('z', float_type)
self.decl = {
'x': int_type,
'y': float_type,
'z': float_type,
}
def test_full(self):
expr = expression_factory(
operators.ADD_OP, expression_factory(
operators.ADD_OP, self.x, self.y), self.z)
inputs = [
(self.x, self.i1),
(self.y, ErrorSemantics([3.0, 4.0], [0, 0])),
(self.z, ErrorSemantics([5, 6], [0, 0])),
]
outputs = [self.w]
body = CompositionalFlow([
PragmaInputFlow(inputs),
PragmaOutputFlow(outputs),
AssignFlow(self.w, expr),
])
flow = ProgramFlow(body)
prog = """
#pragma soap input \
int x=1, float y=[3.0, 4.0], float z=[5.0, 6.0][0, 0]
#pragma soap output w
float w = x + y + z;
"""
parsed_flow = parse(prog)
self.assertListEqual(list(parsed_flow.inputs.items()), inputs)
self.assertListEqual(parsed_flow.outputs, outputs)
self.assertEqual(parsed_flow, flow)
|
|
# Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import eventlet
from swift.common.utils import cache_from_env, get_logger
from swift.proxy.controllers.base import get_container_memcache_key
from swift.common.memcached import MemcacheConnectionError
from swift.common.swob import Request, Response
class MaxSleepTimeHitError(Exception):
pass
class RateLimitMiddleware(object):
"""
Rate limiting middleware
Rate limits requests on both an Account and Container level. Limits are
configurable.
"""
BLACK_LIST_SLEEP = 1
def __init__(self, app, conf, logger=None):
self.app = app
if logger:
self.logger = logger
else:
self.logger = get_logger(conf, log_route='ratelimit')
self.account_ratelimit = float(conf.get('account_ratelimit', 0))
self.max_sleep_time_seconds = \
float(conf.get('max_sleep_time_seconds', 60))
self.log_sleep_time_seconds = \
float(conf.get('log_sleep_time_seconds', 0))
self.clock_accuracy = int(conf.get('clock_accuracy', 1000))
self.rate_buffer_seconds = int(conf.get('rate_buffer_seconds', 5))
self.ratelimit_whitelist = \
[acc.strip() for acc in
conf.get('account_whitelist', '').split(',') if acc.strip()]
self.ratelimit_blacklist = \
[acc.strip() for acc in
conf.get('account_blacklist', '').split(',') if acc.strip()]
self.memcache_client = None
conf_limits = []
for conf_key in conf.keys():
if conf_key.startswith('container_ratelimit_'):
cont_size = int(conf_key[len('container_ratelimit_'):])
rate = float(conf[conf_key])
conf_limits.append((cont_size, rate))
conf_limits.sort()
self.container_ratelimits = []
while conf_limits:
cur_size, cur_rate = conf_limits.pop(0)
if conf_limits:
next_size, next_rate = conf_limits[0]
slope = (float(next_rate) - float(cur_rate)) \
/ (next_size - cur_size)
def new_scope(cur_size, slope, cur_rate):
# making new scope for variables
return lambda x: (x - cur_size) * slope + cur_rate
line_func = new_scope(cur_size, slope, cur_rate)
else:
line_func = lambda x: cur_rate
self.container_ratelimits.append((cur_size, cur_rate, line_func))
def get_container_maxrate(self, container_size):
"""
Returns number of requests allowed per second for given container size.
"""
last_func = None
if container_size:
container_size = int(container_size)
for size, rate, func in self.container_ratelimits:
if container_size < size:
break
last_func = func
if last_func:
return last_func(container_size)
return None
def get_ratelimitable_key_tuples(self, req_method, account_name,
container_name=None, obj_name=None):
"""
Returns a list of key (used in memcache), ratelimit tuples. Keys
should be checked in order.
:param req_method: HTTP method
:param account_name: account name from path
:param container_name: container name from path
:param obj_name: object name from path
"""
keys = []
# COPYs are not limited
if self.account_ratelimit and \
account_name and container_name and not obj_name and \
req_method in ('PUT', 'DELETE'):
keys.append(("ratelimit/%s" % account_name,
self.account_ratelimit))
if account_name and container_name and obj_name and \
req_method in ('PUT', 'DELETE', 'POST'):
container_size = None
memcache_key = get_container_memcache_key(account_name,
container_name)
container_info = self.memcache_client.get(memcache_key)
if isinstance(container_info, dict):
container_size = container_info.get(
'object_count', container_info.get('container_size', 0))
container_rate = self.get_container_maxrate(container_size)
if container_rate:
keys.append(("ratelimit/%s/%s" % (account_name,
container_name),
container_rate))
return keys
def _get_sleep_time(self, key, max_rate):
'''
Returns the amount of time (a float in seconds) that the app
should sleep.
:param key: a memcache key
:param max_rate: maximum rate allowed in requests per second
:raises: MaxSleepTimeHitError if max sleep time is exceeded.
'''
try:
now_m = int(round(time.time() * self.clock_accuracy))
time_per_request_m = int(round(self.clock_accuracy / max_rate))
running_time_m = self.memcache_client.incr(
key, delta=time_per_request_m)
need_to_sleep_m = 0
if (now_m - running_time_m >
self.rate_buffer_seconds * self.clock_accuracy):
next_avail_time = int(now_m + time_per_request_m)
self.memcache_client.set(key, str(next_avail_time),
serialize=False)
else:
need_to_sleep_m = \
max(running_time_m - now_m - time_per_request_m, 0)
max_sleep_m = self.max_sleep_time_seconds * self.clock_accuracy
if max_sleep_m - need_to_sleep_m <= self.clock_accuracy * 0.01:
# treat as no-op decrement time
self.memcache_client.decr(key, delta=time_per_request_m)
raise MaxSleepTimeHitError(
"Max Sleep Time Exceeded: %.2f" %
(float(need_to_sleep_m) / self.clock_accuracy))
return float(need_to_sleep_m) / self.clock_accuracy
except MemcacheConnectionError:
return 0
def handle_ratelimit(self, req, account_name, container_name, obj_name):
'''
Performs rate limiting and account white/black listing. Sleeps
if necessary. If self.memcache_client is not set, immediately returns
None.
:param account_name: account name from path
:param container_name: container name from path
:param obj_name: object name from path
'''
if not self.memcache_client:
return None
if account_name in self.ratelimit_blacklist:
self.logger.error(_('Returning 497 because of blacklisting: %s'),
account_name)
eventlet.sleep(self.BLACK_LIST_SLEEP)
return Response(status='497 Blacklisted',
body='Your account has been blacklisted',
request=req)
if account_name in self.ratelimit_whitelist:
return None
for key, max_rate in self.get_ratelimitable_key_tuples(
req.method, account_name, container_name=container_name,
obj_name=obj_name):
try:
need_to_sleep = self._get_sleep_time(key, max_rate)
if self.log_sleep_time_seconds and \
need_to_sleep > self.log_sleep_time_seconds:
self.logger.warning(
_("Ratelimit sleep log: %(sleep)s for "
"%(account)s/%(container)s/%(object)s"),
{'sleep': need_to_sleep, 'account': account_name,
'container': container_name, 'object': obj_name})
if need_to_sleep > 0:
eventlet.sleep(need_to_sleep)
except MaxSleepTimeHitError, e:
self.logger.error(
_('Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s '
'. Ratelimit (Max Sleep) %(e)s'),
{'meth': req.method, 'acc': account_name,
'cont': container_name, 'obj': obj_name, 'e': str(e)})
error_resp = Response(status='498 Rate Limited',
body='Slow down', request=req)
return error_resp
return None
def __call__(self, env, start_response):
"""
WSGI entry point.
Wraps env in swob.Request object and passes it down.
:param env: WSGI environment dictionary
:param start_response: WSGI callable
"""
req = Request(env)
if self.memcache_client is None:
self.memcache_client = cache_from_env(env)
if not self.memcache_client:
self.logger.warning(
_('Warning: Cannot ratelimit without a memcached client'))
return self.app(env, start_response)
try:
version, account, container, obj = req.split_path(1, 4, True)
except ValueError:
return self.app(env, start_response)
ratelimit_resp = self.handle_ratelimit(req, account, container, obj)
if ratelimit_resp is None:
return self.app(env, start_response)
else:
return ratelimit_resp(env, start_response)
def filter_factory(global_conf, **local_conf):
"""
paste.deploy app factory for creating WSGI proxy apps.
"""
conf = global_conf.copy()
conf.update(local_conf)
def limit_filter(app):
return RateLimitMiddleware(app, conf)
return limit_filter
|
|
# -----------------------------------------------------------------------
# Standalone and testing code
from ctypes import *
try:
import _idaapi
except:
print("Please try me from inside IDA")
sys.exit(0)
try:
import pywraps
pywraps_there = True
print("Choose2: using pywraps")
_idaapi.choose2_create = pywraps.py_choose2_create
_idaapi.choose2_activate = pywraps.py_choose2_activate
_idaapi.choose2_refresh = pywraps.py_choose2_refresh
_idaapi.choose2_close = pywraps.py_choose2_close
_idaapi.choose2_add_command = pywraps.py_choose2_add_command
_idaapi.choose2_get_embedded = pywraps.py_choose2_get_embedded
_idaapi.choose2_get_embedded_selection = pywraps.py_choose2_get_embedded_selection
try:
# Get function address
# void test_embedded(chooser_info_t *)
TEST_EMBEDDED = CFUNCTYPE(c_void_p, c_void_p)
test_embedded = TEST_EMBEDDED(pywraps.py_choose2_get_test_embedded())
except Exception as e:
test_embedded = None
print("Choose2: Exception: %s" % str(e))
except Exception as e:
pywraps_there = False
print("Choose2: Not using pywraps: %s" % str(e))
# -----------------------------------------------------------------------
#<pycode(py_choose2)>
class Choose2(object):
"""
Choose2 wrapper class.
Some constants are defined in this class. Please refer to kernwin.hpp for more information.
"""
CH_MODAL = 0x01
"""Modal chooser"""
CH_MULTI = 0x02
"""Allow multi selection"""
CH_MULTI_EDIT = 0x04
CH_NOBTNS = 0x08
CH_ATTRS = 0x10
CH_NOIDB = 0x20
"""use the chooser even without an open database, same as x0=-2"""
CH_UTF8 = 0x40
"""string encoding is utf-8"""
CH_BUILTIN_MASK = 0xF80000
# column flags (are specified in the widths array)
CHCOL_PLAIN = 0x00000000
CHCOL_PATH = 0x00010000
CHCOL_HEX = 0x00020000
CHCOL_DEC = 0x00030000
CHCOL_FORMAT = 0x00070000
def __init__(self, title, cols, flags=0, popup_names=None,
icon=-1, x1=-1, y1=-1, x2=-1, y2=-1, deflt=-1,
embedded=False, width=None, height=None):
"""
Constructs a chooser window.
@param title: The chooser title
@param cols: a list of colums; each list item is a list of two items
example: [ ["Address", 10 | Choose2.CHCOL_HEX], ["Name", 30 | Choose2.CHCOL_PLAIN] ]
@param flags: One of CH_XXXX constants
@param deflt: Default starting item
@param popup_names: list of new captions to replace this list ["Insert", "Delete", "Edit", "Refresh"]
@param icon: Icon index (the icon should exist in ida resources or an index to a custom loaded icon)
@param x1, y1, x2, y2: The default location
@param embedded: Create as embedded chooser
@param width: Embedded chooser width
@param height: Embedded chooser height
"""
self.title = title
self.flags = flags
self.cols = cols
self.deflt = deflt
self.popup_names = popup_names
self.icon = icon
self.x1 = x1
self.y1 = y1
self.x2 = x2
self.y2 = y2
self.embedded = embedded
if embedded:
self.x1 = width
self.y1 = height
def Embedded(self):
"""
Creates an embedded chooser (as opposed to Show())
@return: Returns 1 on success
"""
return _idaapi.choose2_create(self, True)
def GetEmbSelection(self):
"""
Returns the selection associated with an embedded chooser
@return:
- None if chooser is not embedded
- A list with selection indices (0-based)
"""
return _idaapi.choose2_get_embedded_selection(self)
def Show(self, modal=False):
"""
Activates or creates a chooser window
@param modal: Display as modal dialog
@return: For modal choosers it will return the selected item index (0-based)
"""
if modal:
self.flags |= Choose2.CH_MODAL
# Disable the timeout
old = _idaapi.set_script_timeout(0)
n = _idaapi.choose2_create(self, False)
_idaapi.set_script_timeout(old)
# Delete the modal chooser instance
self.Close()
return n
else:
self.flags &= ~Choose2.CH_MODAL
return _idaapi.choose2_create(self, False)
def Activate(self):
"""Activates a visible chooser"""
return _idaapi.choose2_activate(self)
def Refresh(self):
"""Causes the refresh callback to trigger"""
return _idaapi.choose2_refresh(self)
def Close(self):
"""Closes the chooser"""
return _idaapi.choose2_close(self)
def AddCommand(self,
caption,
flags = _idaapi.CHOOSER_POPUP_MENU,
menu_index = -1,
icon = -1,
emb=None):
"""
Deprecated: Use
- register_action()
- attach_action_to_menu()
- attach_action_to_popup()
"""
# Use the 'emb' as a sentinel. It will be passed the correct value from the EmbeddedChooserControl
if self.embedded and ((emb is None) or (emb != 2002)):
raise RuntimeError("Please add a command through EmbeddedChooserControl.AddCommand()")
return _idaapi.choose2_add_command(self, caption, flags, menu_index, icon)
#
# Implement these methods in the subclass:
#
#<pydoc>
# def OnClose(self):
# """
# Called when the window is being closed.
# This callback is mandatory.
# @return: nothing
# """
# pass
#
# def OnGetLine(self, n):
# """Called when the chooser window requires lines.
# This callback is mandatory.
# @param n: Line number (0-based)
# @return: The user should return a list with ncols elements.
# example: a list [col1, col2, col3, ...] describing the n-th line
# """
# return ["col1 val", "col2 val"]
#
# def OnGetSize(self):
# """Returns the element count.
# This callback is mandatory.
# @return: Number of elements
# """
# return len(self.the_list)
#
# def OnEditLine(self, n):
# """
# Called when an item is being edited.
# @param n: Line number (0-based)
# @return: Nothing
# """
# pass
#
# def OnInsertLine(self):
# """
# Called when 'Insert' is selected either via the hotkey or popup menu.
# @return: Nothing
# """
# pass
#
# def OnSelectLine(self, n):
# """
# Called when a line is selected and then Ok or double click was pressed
# @param n: Line number (0-based)
# """
# pass
#
# def OnSelectionChange(self, sel_list):
# """
# Called when the selection changes
# @param sel_list: A list of selected item indices
# """
# pass
#
# def OnDeleteLine(self, n):
# """
# Called when a line is about to be deleted
# @param n: Line number (0-based)
# """
# return self.n
#
# def OnRefresh(self, n):
# """
# Triggered when the 'Refresh' is called from the popup menu item.
#
# @param n: The currently selected line (0-based) at the time of the refresh call
# @return: Return the number of elements
# """
# return self.n
#
# def OnRefreshed(self):
# """
# Triggered when a refresh happens (for example due to column sorting)
# @param n: Line number (0-based)
# @return: Return the number of elements
# """
# return self.n
#
# def OnCommand(self, n, cmd_id):
# """Return int ; check add_chooser_command()"""
# return 0
#
# def OnGetIcon(self, n):
# """
# Return icon number for a given item (or -1 if no icon is avail)
# @param n: Line number (0-based)
# """
# return -1
#
# def OnGetLineAttr(self, n):
# """
# Return list [bgcolor, flags=CHITEM_XXXX] or None; check chooser_item_attrs_t
# @param n: Line number (0-based)
# """
# return [0x0, CHITEM_BOLD]
#</pydoc>
#</pycode(py_choose2)>
|
|
"""
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
|
|
#!/usr/bin/env python
""" Copy mongo data to psql by using two strategies:
1. Do initial load - copy data using trunk&load process, which rewriting
destination data every time.
2. If mongodb oplog - 'operational log' is enabled - patch psql data by oplog
operations, so it's should not overwrite dest data. If initial load is complete
but sync point is not yet located then synchronization process will be started.
The sync point - 'oplog timestamp' is the result of syncronization. That means
all data from oplog can be applied to psql data starting from that timestamp.
If sync is failed or data verification is failed at patch applying it's will
start initial load again. Every application session will log status data into
psql table 'qmetlstatus' in public schema."""
__author__ = "Yaroslav Litvinov"
__copyright__ = "Copyright 2016, Rackspace Inc."
__email__ = "yaroslav.litvinov@rackspace.com"
import os
import sys
import argparse
import configparser
import datetime
import logging
from os import system
from logging import getLogger
from collections import namedtuple
from mongo_reader.reader import MongoReader
from mongo_reader.reader import mongo_reader_from_settings
from gizer.all_schema_engines import get_schema_engines_as_dict
from gizer.etlstatus_table import STATUS_INITIAL_LOAD
from gizer.etlstatus_table import STATUS_OPLOG_SYNC
from gizer.etlstatus_table import STATUS_OPLOG_APPLY
from gizer.etlstatus_table import STATUS_OPLOG_RESYNC
from gizer.etlstatus_table import PsqlEtlStatusTable
from gizer.etlstatus_table import PsqlEtlStatusTableManager
from gizer.oplog_sync_alligned_data import OplogSyncAllignedData
from gizer.oplog_sync_unalligned_data import OplogSyncUnallignedData
from gizer.psql_requests import PsqlRequests
from gizer.psql_requests import psql_conn_from_settings
from gizer.opconfig import psql_settings_from_config
from gizer.opconfig import mongo_settings_from_config
from gizer.opconfig import load_mongo_replicas_from_setting
from gizer.log import save_loglevel
def sectkey(section_name, base_key_name):
""" Return key config value. Key name in file must be concatenation
of both params divided by hyphen """
return "%s-%s".format(section_name, base_key_name)
def getargs():
""" get args from cmdline """
default_request = '{}'
parser = argparse.ArgumentParser()
args = parser.parse_args()
if args.js_request is None:
args.js_request = default_request
return args
def create_logger(logspath, name):
today = datetime.datetime.now()
logfilename='{date}-{name}.log'.format(name=name,
date=today.strftime('%Y-%m-%d'))
logfilename = os.path.join(logspath, logfilename)
logging.basicConfig(filename=logfilename,
level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s')
save_loglevel()
logger = getLogger(__name__)
logger.info('Created')
def reinit_conn(config_settings, psql, status_manager):
# recreate conn used by status_manager,
# for long running sync/apply
psql.reinit(psql_conn_from_settings(config_settings))
status_manager.status_table.replace_conn(psql)
def main():
""" main """
parser = argparse.ArgumentParser()
parser.add_argument("--config-file", action="store",
help="Config file with settings",
type=file, required=True)
args = parser.parse_args()
config = configparser.ConfigParser()
config.read_file(args.config_file)
schemas_path = config['misc']['schemas-dir']
logspath = config['misc']['logs-dir']
oplog_settings = load_mongo_replicas_from_setting(config, 'mongo-oplog')
mongo_settings = mongo_settings_from_config(config, 'mongo')
psql_settings = psql_settings_from_config(config, 'psql')
mongo_readers = {}
schema_engines = get_schema_engines_as_dict(schemas_path)
for collection_name in schema_engines:
reader = mongo_reader_from_settings(mongo_settings, collection_name, {})
mongo_readers[collection_name] = reader
mongo_readers[collection_name].set_name(collection_name)
# create oplog read transport/s
oplog_readers = {}
for oplog_name, settings_list in oplog_settings.iteritems():
# settings list is a replica set (must be at least one in list)
oplog_readers[oplog_name] = \
mongo_reader_from_settings(settings_list, 'oplog.rs', {})
oplog_readers[oplog_name].set_name(oplog_name)
psql_qmetl = PsqlRequests(psql_conn_from_settings(psql_settings))
psql_main = PsqlRequests(psql_conn_from_settings(psql_settings))
status_table = PsqlEtlStatusTable(psql_qmetl.cursor,
config['psql']['psql-schema-name'],
sorted(oplog_settings.keys()))
status_manager = PsqlEtlStatusTableManager(status_table)
psql_schema = config['psql']['psql-schema-name']
res = 0
status = status_table.get_recent()
if status:
if (status.status == STATUS_INITIAL_LOAD \
or status.status == STATUS_OPLOG_RESYNC) \
and status.time_end and not status.error:
create_logger(logspath, 'oplogsync')
psql_sync = psql_main
# intial load done, save oplog sync status and do oplog sync.
status_manager.oplog_sync_start(status.ts)
unalligned_sync = OplogSyncUnallignedData(
psql_qmetl, psql_sync, mongo_readers, oplog_readers,
schemas_path, schema_engines, psql_schema)
try:
ts = unalligned_sync.sync(status.ts)
stat = unalligned_sync.statistic()
reinit_conn(psql_settings, psql_qmetl, status_manager)
if ts: # sync ok
status_manager.oplog_sync_finish(stat[0], stat[1], ts, False)
res = 0
else: # error
status_manager.oplog_sync_finish(stat[0], stat[1], None, True)
res = -1
except Exception, e:
getLogger(__name__).error(e, exc_info=True)
getLogger(__name__).error('ROLLBACK CLOSE')
psql_sync.conn.rollback()
reinit_conn(psql_settings, psql_qmetl, status_manager)
status_manager.oplog_sync_finish(None, True)
res = -1
elif (status.status == STATUS_OPLOG_SYNC or \
status.status == STATUS_OPLOG_APPLY) \
and status.time_end and not status.error:
create_logger(logspath, 'oploguse')
# sync done, now apply oplog pacthes to main psql
# save oplog sync status
getLogger(__name__).\
info('Sync point is ts:{ts}'.format(ts=status.ts))
status_manager.oplog_use_start(status.ts)
alligned_sync = \
OplogSyncAllignedData(psql_main, mongo_readers, oplog_readers,
schemas_path, schema_engines, psql_schema)
try:
ts_res = alligned_sync.sync(status.ts)
stat = alligned_sync.statistic()
reinit_conn(psql_settings, psql_qmetl, status_manager)
if ts_res == 'resync':
# some records recovered must do resync at next step
status_manager.oplog_resync_finish(stat[0], stat[1],
status.ts, False)
res= 0
elif ts_res: # oplog apply ok
status_manager.oplog_use_finish(stat[0], stat[1],
ts_res, False)
res= 0
else: # error
status_manager.oplog_use_finish(stat[0], stat[1], None, True)
res = -1
except Exception, e:
getLogger(__name__).error(e, exc_info=True)
getLogger(__name__).error('ROLLBACK CLOSE')
psql_main.conn.rollback()
reinit_conn(psql_settings, psql_qmetl, status_manager)
status_manager.oplog_use_finish(None, None, None, True)
res = -1
else:
# initial load is not performed
# or not time_end for any other state, or error, do exit
res = -1
else:
# initial load is not performed
res = -1
getLogger(__name__).info('exiting with code %d' % res)
return res
if __name__ == "__main__":
main()
|
|
import clingo
import yaml
import threading
import os
class WrappedPropagateControl(object):
def __init__(self, control, calls):
self.control = control
self.calls = calls
def add_literal(self):
call = {}
call["name"] = "add_literal"
call["args"] = []
self.calls.append(call)
return self.control.add_literal()
def add_clause(self, lits, tag=False, lock=False):
lits = list(lits)
call = {}
call["name"] = "add_clause"
call["args"] = [lits[:], tag, lock]
self.calls.append(call)
if isinstance(self.control, clingo.PropagateInit):
ret = self.control.add_clause(lits)
else:
ret = self.control.add_clause(lits, tag, lock)
call["ret"] = ret
return ret
def add_watch(self, lit):
call = {}
call["name"] = "add_watch"
call["args"] = [lit]
self.calls.append(call)
return self.control.add_watch(lit)
def propagate(self):
call = {}
call["name"] = "propagate"
call["args"] = []
self.calls.append(call)
ret = self.control.propagate()
call["ret"] = ret
return ret
def __getattribute__(self, name):
dc = object.__getattribute__(WrappedPropagateControl, "__dict__")
do = object.__getattribute__(self, "__dict__")
if name in dc or name in do:
return object.__getattribute__(self, name)
return getattr(self.control, name)
class WrappedBackend:
def __init__(self, ctl):
self.script = ctl.script
self.backend = ctl.ctl.backend()
def __enter__(self, *args, **kwargs):
self.script.write("with ctl.backend() as b:\n")
self.backend.__enter__(*args, **kwargs)
return self
def add_atom(self, symbol=None):
self.script.write(" b.add_atom(clingo.parse_term({}))".format(repr(str(symbol))))
self.script.flush()
ret = self.backend.add_atom(symbol)
self.script.write(" # {}\n".format(ret))
self.script.flush()
return ret
def add_rule(self, head, body=[], choice=False):
self.script.write(" b.add_rule({}, {}, {})\n".format(head, body, choice))
self.script.flush()
return self.backend.add_rule(head, body, choice)
def __exit__(self, *args, **kwargs):
return self.backend.__exit__(*args, **kwargs)
class WrappedControl:
def __init__(self, args=[]):
self.script = open("replay.py", "w")
self.prefix = "file_"
self.files = 0
self.ctl = clingo.Control(args)
self.script.write("import clingo\n")
self.script.write("ctl = clingo.Control({})\n".format(repr(args)))
self.script.flush()
self.g_trace = []
def backend(self):
return WrappedBackend(self)
def load(self, path):
self.files += 1
name = "{}{}.lp".format(self.prefix, self.files)
open(name, "w").write(open(path).read())
self.script.write("ctl.load({})\n".format(repr(name)))
self.script.flush()
self.ctl.load(name)
def ground(self, parts, context=None):
self.script.write("ctl.ground({})\n".format(repr(parts)))
self.script.flush()
self.ctl.ground(parts, context)
def solve(self, assumptions=[], on_model=None, on_finish=None, yield_=False, async_=False):
self.script.write("ctl.solve({}).get()\n".format(repr(assumptions)))
self.script.flush()
return self.ctl.solve(assumptions, on_model, on_finish, yield_, async_)
def wrap_init(self, init):
calls = []
trace = {}
trace["state"] = "init"
trace["thread_id"] = 0
trace["calls"] = calls
self.g_trace.append(trace)
return WrappedPropagateControl(init, calls)
def wrap_control(self, control, where):
calls = []
trace = {}
trace["state"] = where
trace["thread_id"] = control.thread_id
trace["calls"] = calls
self.g_trace.append(trace)
return WrappedPropagateControl(control, calls)
def write_trace(self):
txt = yaml.dump(self.g_trace, indent=2)
with open("new-trace.yml", "w") as f:
f.write(txt)
try:
a = os.path.getsize("trace.yml")
b = os.path.getsize("new-trace.yml")
if b < a:
with open("trace.yml", "w") as f:
f.write(txt)
except:
with open("trace.yml", "w") as f:
f.write(txt)
@property
def statistics(self):
return self.ctl.statistics
@property
def _to_c(self):
return self.ctl._to_c
class Retracer:
def __init__(self):
self.trace = yaml.load(open("trace.yml").read(), Loader=yaml.FullLoader)
self.cv = threading.Condition()
def init(self, init):
init.check_mode = clingo.PropagatorCheckMode.Fixpoint
self.run_trace(init, 0, "init")
'''
for trace in self.trace:
print('trace.emplace_back("{}", {}, CallVec{{}});'.format(trace["state"], trace["thread_id"]))
for call in trace["calls"]:
if call["args"] and isinstance(call["args"][0], list):
args = map(str, call["args"][0])
else:
args = map(str, call["args"])
print('std::get<2>(trace.back()).emplace_back("{}",LitVec{{{}}});'.format(call["name"], ",".join(args)))
'''
def match(self, thread_id, where):
if not self.trace:
return True
top = self.trace[0]
return top["state"] == where and top["thread_id"] == thread_id
def run_trace(self, control, thread_id, where):
self.cv.acquire()
print("WAIT: ", thread_id, where)
while not self.match(thread_id, where):
self.cv.wait()
print(" START: ", thread_id, where)
if self.trace:
top = self.trace[0]
for call in top["calls"]:
if call["name"] == "propagate" or call["name"] == "add_clause":
print(" CALL", "{}({})".format(call["name"], ",".join(map(str, call["args"]))), "EXPECTING", call["ret"])
ret = getattr(control, call["name"])(*call["args"])
print(" RESULT", ret)
else:
print(" CALL", call["name"])
getattr(control, call["name"])(*call["args"])
self.trace.pop(0)
if self.trace:
print(" NEXT: ", self.trace[0]["state"], self.trace[0]["thread_id"])
self.cv.notify_all()
self.cv.release()
def propagate(self, control, changes):
self.run_trace(control, control.thread_id, "propagate")
def check(self, control):
self.run_trace(control, control.thread_id, "check")
if __name__ == "__main__":
ctl = clingo.Control(["-t3", "0"])
ctl.register_propagator(Retracer())
print("============ STEP 1 ============")
ctl.add("step1", [], """\
#theory cp {
sum_term { };
&minimize/0 : sum_term, directive
}.
&minimize { x }.
""")
ctl.ground([("step1", [])])
n = 0
for m in ctl.solve(yield_=True):
n += 1
print("--- Found model[{}] ---".format(m.thread_id))
print("MODELS", n)
print()
print("============ STEP 2 ============")
ctl.add("step2", [], "")
ctl.ground([("step2", [])])
n = 0
for m in ctl.solve(yield_=True):
n += 1
print("--- Found model[{}] ---".format(m.thread_id))
print("MODELS", n)
|
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class GridHead(BaseModule):
def __init__(self,
grid_points=9,
num_convs=8,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
point_feat_channels=64,
deconv_kernel_size=4,
class_agnostic=False,
loss_grid=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=15),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=36),
init_cfg=[
dict(type='Kaiming', layer=['Conv2d', 'Linear']),
dict(
type='Normal',
layer='ConvTranspose2d',
std=0.001,
override=dict(
type='Normal',
name='deconv2',
std=0.001,
bias=-np.log(0.99 / 0.01)))
]):
super(GridHead, self).__init__(init_cfg)
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
# the predicted heatmap is half of whole_map_size
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
# compute point-wise sub-regions
self.sub_regions = self.calc_sub_regions()
self.convs = []
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(
self.conv_out_channels,
self.conv_out_channels,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(
self.conv_out_channels,
grid_points,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
# find the 4-neighbor of each grid point
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size): # i-th column
for j in range(grid_size): # j-th row
neighbors = []
if i > 0: # left: (i - 1, j)
neighbors.append((i - 1) * grid_size + j)
if j > 0: # up: (i, j - 1)
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1: # down: (i, j + 1)
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1: # right: (i + 1, j)
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
# total edges in the grid
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList() # first-order feature transition
self.sorder_trans = nn.ModuleList() # second-order feature transition
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
# each transition module consists of a 5x5 depth-wise conv and
# 1x1 conv.
fo_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
stride=1,
padding=2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
so_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
1,
2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = build_loss(loss_grid)
def forward(self, x):
assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
# RoI feature transformation, downsample 2x
x = self.convs(x)
c = self.point_feat_channels
# first-order fusion
x_fo = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_fo[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_fo[i] = x_fo[i] + self.forder_trans[i][j](
x[:, point_idx * c:(point_idx + 1) * c])
# second-order fusion
x_so = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_so[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
# predicted heatmap with fused features
x2 = torch.cat(x_so, dim=1)
x2 = self.deconv1(x2)
x2 = F.relu(self.norm1(x2), inplace=True)
heatmap = self.deconv2(x2)
# predicted heatmap with original features (applicable during training)
if self.training:
x1 = x
x1 = self.deconv1(x1)
x1 = F.relu(self.norm1(x1), inplace=True)
heatmap_unfused = self.deconv2(x1)
else:
heatmap_unfused = heatmap
return dict(fused=heatmap, unfused=heatmap_unfused)
def calc_sub_regions(self):
"""Compute point specific representation regions.
See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
"""
# to make it consistent with the original implementation, half_size
# is computed as 2 * quarter_size, which is smaller
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append(
(sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
return sub_regions
def get_targets(self, sampling_results, rcnn_train_cfg):
# mix all samples (across images) together.
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
dim=0).cpu()
pos_gt_bboxes = torch.cat(
[res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
assert pos_bboxes.shape == pos_gt_bboxes.shape
# expand pos_bboxes to 2x of original size
x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
num_rois = pos_bboxes.shape[0]
map_size = self.whole_map_size
# this is not the final target shape
targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
dtype=torch.float)
# pre-compute interpolation factors for all grid points.
# the first item is the factor of x-dim, and the second is y-dim.
# for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
factors = []
for j in range(self.grid_points):
x_idx = j // self.grid_size
y_idx = j % self.grid_size
factors.append((1 - x_idx / (self.grid_size - 1),
1 - y_idx / (self.grid_size - 1)))
radius = rcnn_train_cfg.pos_radius
radius2 = radius**2
for i in range(num_rois):
# ignore small bboxes
if (pos_bbox_ws[i] <= self.grid_size
or pos_bbox_hs[i] <= self.grid_size):
continue
# for each grid point, mark a small circle as positive
for j in range(self.grid_points):
factor_x, factor_y = factors[j]
gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
1 - factor_x) * pos_gt_bboxes[i, 2]
gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
1 - factor_y) * pos_gt_bboxes[i, 3]
cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
map_size)
cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
map_size)
for x in range(cx - radius, cx + radius + 1):
for y in range(cy - radius, cy + radius + 1):
if x >= 0 and x < map_size and y >= 0 and y < map_size:
if (x - cx)**2 + (y - cy)**2 <= radius2:
targets[i, j, y, x] = 1
# reduce the target heatmap size by a half
# proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
sub_targets = []
for i in range(self.grid_points):
sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
sub_targets = torch.cat(sub_targets, dim=1)
sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
return sub_targets
def loss(self, grid_pred, grid_targets):
loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
loss_grid = loss_fused + loss_unfused
return dict(loss_grid=loss_grid)
def get_bboxes(self, det_bboxes, grid_pred, img_metas):
# TODO: refactoring
assert det_bboxes.shape[0] == grid_pred.shape[0]
det_bboxes = det_bboxes.cpu()
cls_scores = det_bboxes[:, [4]]
det_bboxes = det_bboxes[:, :4]
grid_pred = grid_pred.sigmoid().cpu()
R, c, h, w = grid_pred.shape
half_size = self.whole_map_size // 4 * 2
assert h == w == half_size
assert c == self.grid_points
# find the point with max scores in the half-sized heatmap
grid_pred = grid_pred.view(R * c, h * w)
pred_scores, pred_position = grid_pred.max(dim=1)
xs = pred_position % w
ys = pred_position // w
# get the position in the whole heatmap instead of half-sized heatmap
for i in range(self.grid_points):
xs[i::self.grid_points] += self.sub_regions[i][0]
ys[i::self.grid_points] += self.sub_regions[i][1]
# reshape to (num_rois, grid_points)
pred_scores, xs, ys = tuple(
map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
# get expanded pos_bboxes
widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
x1 = (det_bboxes[:, 0, None] - widths / 2)
y1 = (det_bboxes[:, 1, None] - heights / 2)
# map the grid point to the absolute coordinates
abs_xs = (xs.float() + 0.5) / w * widths + x1
abs_ys = (ys.float() + 0.5) / h * heights + y1
# get the grid points indices that fall on the bbox boundaries
x1_inds = [i for i in range(self.grid_size)]
y1_inds = [i * self.grid_size for i in range(self.grid_size)]
x2_inds = [
self.grid_points - self.grid_size + i
for i in range(self.grid_size)
]
y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
# voting of all grid points on some boundary
bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
bbox_res = torch.cat(
[bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
return bbox_res
|
|
# Copyright (c) 2017 Dell Inc. or its subsidiaries.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import datetime
from defusedxml import minidom
import hashlib
import random
import re
from cinder.objects.group import Group
from oslo_log import log as logging
from oslo_utils import strutils
import six
from cinder import exception
from cinder.i18n import _
from cinder.objects import fields
from cinder.volume import volume_types
LOG = logging.getLogger(__name__)
# SHARED CONSTANTS
ISCSI = 'iscsi'
FC = 'fc'
INTERVAL = 'interval'
RETRIES = 'retries'
VOLUME_ELEMENT_NAME_PREFIX = 'OS-'
MAX_SRP_LENGTH = 16
TRUNCATE_5 = 5
TRUNCATE_27 = 27
ARRAY = 'array'
SLO = 'slo'
WORKLOAD = 'workload'
SRP = 'srp'
PORTGROUPNAME = 'storagetype:portgroupname'
DEVICE_ID = 'device_id'
INITIATOR_CHECK = 'initiator_check'
SG_NAME = 'storagegroup_name'
MV_NAME = 'maskingview_name'
IG_NAME = 'init_group_name'
PARENT_SG_NAME = 'parent_sg_name'
CONNECTOR = 'connector'
VOL_NAME = 'volume_name'
EXTRA_SPECS = 'extra_specs'
IS_RE = 'replication_enabled'
DISABLECOMPRESSION = 'storagetype:disablecompression'
REP_SYNC = 'Synchronous'
REP_ASYNC = 'Asynchronous'
REP_METRO = 'Metro'
REP_MODE = 'rep_mode'
RDF_SYNC_STATE = 'synchronized'
RDF_SYNCINPROG_STATE = 'syncinprog'
RDF_CONSISTENT_STATE = 'consistent'
RDF_SUSPENDED_STATE = 'suspended'
RDF_FAILEDOVER_STATE = 'failed over'
RDF_ACTIVE = 'active'
RDF_ACTIVEACTIVE = 'activeactive'
RDF_ACTIVEBIAS = 'activebias'
METROBIAS = 'metro_bias'
# Cinder.conf vmax configuration
VMAX_SERVER_IP = 'san_ip'
VMAX_USER_NAME = 'san_login'
VMAX_PASSWORD = 'san_password'
VMAX_SERVER_PORT = 'san_rest_port'
VMAX_ARRAY = 'vmax_array'
VMAX_WORKLOAD = 'vmax_workload'
VMAX_SRP = 'vmax_srp'
VMAX_SERVICE_LEVEL = 'vmax_service_level'
VMAX_PORT_GROUPS = 'vmax_port_groups'
class VMAXUtils(object):
"""Utility class for Rest based VMAX volume drivers.
This Utility class is for VMAX volume drivers based on Unisphere Rest API.
"""
def __init__(self):
"""Utility class for Rest based VMAX volume drivers."""
def get_host_short_name(self, host_name):
"""Returns the short name for a given qualified host name.
Checks the host name to see if it is the fully qualified host name
and returns part before the dot. If there is no dot in the host name
the full host name is returned.
:param host_name: the fully qualified host name
:returns: string -- the short host_name
"""
host_array = host_name.split('.')
if len(host_array) > 1:
short_host_name = host_array[0]
else:
short_host_name = host_name
return self.generate_unique_trunc_host(short_host_name)
@staticmethod
def get_volumetype_extra_specs(volume, volume_type_id=None):
"""Gets the extra specs associated with a volume type.
:param volume: the volume dictionary
:param volume_type_id: Optional override for volume.volume_type_id
:returns: dict -- extra_specs - the extra specs
:raises: VolumeBackendAPIException
"""
extra_specs = {}
try:
if volume_type_id:
type_id = volume_type_id
else:
type_id = volume.volume_type_id
if type_id is not None:
extra_specs = volume_types.get_volume_type_extra_specs(type_id)
except Exception as e:
LOG.debug('Exception getting volume type extra specs: %(e)s',
{'e': six.text_type(e)})
return extra_specs
@staticmethod
def get_short_protocol_type(protocol):
"""Given the protocol type, return I for iscsi and F for fc.
:param protocol: iscsi or fc
:returns: string -- 'I' for iscsi or 'F' for fc
"""
if protocol.lower() == ISCSI.lower():
return 'I'
elif protocol.lower() == FC.lower():
return 'F'
else:
return protocol
@staticmethod
def truncate_string(str_to_truncate, max_num):
"""Truncate a string by taking first and last characters.
:param str_to_truncate: the string to be truncated
:param max_num: the maximum number of characters
:returns: string -- truncated string or original string
"""
if len(str_to_truncate) > max_num:
new_num = len(str_to_truncate) - max_num // 2
first_chars = str_to_truncate[:max_num // 2]
last_chars = str_to_truncate[new_num:]
str_to_truncate = first_chars + last_chars
return str_to_truncate
@staticmethod
def get_time_delta(start_time, end_time):
"""Get the delta between start and end time.
:param start_time: the start time
:param end_time: the end time
:returns: string -- delta in string H:MM:SS
"""
delta = end_time - start_time
return six.text_type(datetime.timedelta(seconds=int(delta)))
def get_default_storage_group_name(
self, srp_name, slo, workload, is_compression_disabled=False,
is_re=False, rep_mode=None):
"""Determine default storage group from extra_specs.
:param srp_name: the name of the srp on the array
:param slo: the service level string e.g Bronze
:param workload: the workload string e.g DSS
:param is_compression_disabled: flag for disabling compression
:param is_re: flag for replication
:param rep_mode: flag to indicate replication mode
:returns: storage_group_name
"""
if slo and workload:
prefix = ("OS-%(srpName)s-%(slo)s-%(workload)s"
% {'srpName': srp_name, 'slo': slo,
'workload': workload})
if is_compression_disabled:
prefix += "-CD"
else:
prefix = "OS-no_SLO"
if is_re:
prefix += self.get_replication_prefix(rep_mode)
storage_group_name = ("%(prefix)s-SG" % {'prefix': prefix})
return storage_group_name
@staticmethod
def get_volume_element_name(volume_id):
"""Get volume element name follows naming convention, i.e. 'OS-UUID'.
:param volume_id: Openstack volume ID containing uuid
:returns: volume element name in format of OS-UUID
"""
element_name = volume_id
uuid_regex = (re.compile(
'[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}',
re.I))
match = uuid_regex.search(volume_id)
if match:
volume_uuid = match.group()
element_name = ("%(prefix)s%(volumeUUID)s"
% {'prefix': VOLUME_ELEMENT_NAME_PREFIX,
'volumeUUID': volume_uuid})
LOG.debug(
"get_volume_element_name elementName: %(elementName)s.",
{'elementName': element_name})
return element_name
@staticmethod
def modify_snapshot_prefix(snapshot_name, manage=False, unmanage=False):
"""Modify a Snapshot prefix on VMAX backend.
Prepare a snapshot name for manage/unmanage snapshot process either
by adding or removing 'OS-' prefix.
:param snapshot_name: the old snapshot backend display name
:param manage: (bool) if the operation is managing a snapshot
:param unmanage: (bool) if the operation is unmanaging a snapshot
:return: snapshot name ready for backend VMAX assignment
"""
new_snap_name = None
if manage:
new_snap_name = ("%(prefix)s%(snapshot_name)s"
% {'prefix': 'OS-',
'snapshot_name': snapshot_name})
if unmanage:
snap_split = snapshot_name.split("-", 1)
if snap_split[0] == 'OS':
new_snap_name = snap_split[1]
return new_snap_name
def generate_unique_trunc_host(self, host_name):
"""Create a unique short host name under 16 characters.
:param host_name: long host name
:returns: truncated host name
"""
if host_name and len(host_name) > 16:
host_name = host_name.lower()
m = hashlib.md5()
m.update(host_name.encode('utf-8'))
uuid = m.hexdigest()
new_name = ("%(host)s%(uuid)s"
% {'host': host_name[-6:],
'uuid': uuid})
host_name = self.truncate_string(new_name, 16)
return host_name
def get_pg_short_name(self, portgroup_name):
"""Create a unique port group name under 12 characters.
:param portgroup_name: long portgroup_name
:returns: truncated portgroup_name
"""
if portgroup_name and len(portgroup_name) > 12:
portgroup_name = portgroup_name.lower()
m = hashlib.md5()
m.update(portgroup_name.encode('utf-8'))
uuid = m.hexdigest()
new_name = ("%(pg)s%(uuid)s"
% {'pg': portgroup_name[-6:],
'uuid': uuid})
portgroup_name = self.truncate_string(new_name, 12)
return portgroup_name
@staticmethod
def get_default_oversubscription_ratio(max_over_sub_ratio):
"""Override ratio if necessary.
The over subscription ratio will be overridden if the user supplied
max oversubscription ratio is less than 1.
:param max_over_sub_ratio: user supplied over subscription ratio
:returns: max_over_sub_ratio
"""
if max_over_sub_ratio < 1.0:
LOG.info("The user supplied value for max_over_subscription "
"ratio is less than 1.0. Using the default value of "
"20.0 instead...")
max_over_sub_ratio = 20.0
return max_over_sub_ratio
@staticmethod
def _process_tag(element, tag_name):
"""Process the tag to get the value.
:param element: the parent element
:param tag_name: the tag name
:returns: nodeValue(can be None)
"""
node_value = None
try:
processed_element = element.getElementsByTagName(tag_name)[0]
node_value = processed_element.childNodes[0].nodeValue
if node_value:
node_value = node_value.strip()
except IndexError:
pass
return node_value
def _get_connection_info(self, rest_element):
"""Given the filename get the rest server connection details.
:param rest_element: the rest element
:returns: dict -- connargs - the connection info dictionary
:raises: VolumeBackendAPIException
"""
connargs = {
'RestServerIp': (
self._process_tag(rest_element, 'RestServerIp')),
'RestServerPort': (
self._process_tag(rest_element, 'RestServerPort')),
'RestUserName': (
self._process_tag(rest_element, 'RestUserName')),
'RestPassword': (
self._process_tag(rest_element, 'RestPassword'))}
for k, __ in connargs.items():
if connargs[k] is None:
exception_message = (_(
"RestServerIp, RestServerPort, RestUserName, "
"RestPassword must have valid values."))
LOG.error(exception_message)
raise exception.VolumeBackendAPIException(
data=exception_message)
# These can be None
connargs['SSLCert'] = self._process_tag(rest_element, 'SSLCert')
connargs['SSLVerify'] = (
self._process_tag(rest_element, 'SSLVerify'))
return connargs
def parse_file_to_get_array_map(self, file_name):
"""Parses a file and gets array map.
Given a file, parse it to get array and pool(srp).
.. code:: ini
<EMC>
<RestServerIp>10.108.246.202</RestServerIp>
<RestServerPort>8443</RestServerPort>
<RestUserName>smc</RestUserName>
<RestPassword>smc</RestPassword>
<SSLCert>/path/client.cert</SSLCert>
<SSLVerify>/path/to/certfile.pem</SSLVerify>
<PortGroups>
<PortGroup>OS-PORTGROUP1-PG</PortGroup>
</PortGroups>
<Array>000198700439</Array>
<SRP>SRP_1</SRP>
</EMC>
:param file_name: the configuration file
:returns: list
"""
LOG.warning("Use of xml file in backend configuration is deprecated "
"in Queens and will not be supported in future releases.")
kwargs = {}
my_file = open(file_name, 'r')
data = my_file.read()
my_file.close()
dom = minidom.parseString(data)
try:
connargs = self._get_connection_info(dom)
portgroup = self._get_random_portgroup(dom)
serialnumber = self._process_tag(dom, 'Array')
if serialnumber is None:
LOG.error("Array Serial Number must be in the file %(file)s.",
{'file': file_name})
srp_name = self._process_tag(dom, 'SRP')
if srp_name is None:
LOG.error("SRP Name must be in the file %(file)s.",
{'file': file_name})
slo = self._process_tag(dom, 'ServiceLevel')
workload = self._process_tag(dom, 'Workload')
kwargs = (
{'RestServerIp': connargs['RestServerIp'],
'RestServerPort': connargs['RestServerPort'],
'RestUserName': connargs['RestUserName'],
'RestPassword': connargs['RestPassword'],
'SSLCert': connargs['SSLCert'],
'SSLVerify': connargs['SSLVerify'],
'SerialNumber': serialnumber,
'srpName': srp_name,
'PortGroup': portgroup})
if slo is not None:
kwargs.update({'ServiceLevel': slo, 'Workload': workload})
except IndexError:
pass
return kwargs
@staticmethod
def _get_random_portgroup(element):
"""Randomly choose a portgroup from list of portgroups.
:param element: the parent element
:returns: the randomly chosen port group
"""
portgroupelements = element.getElementsByTagName('PortGroup')
if portgroupelements and len(portgroupelements) > 0:
portgroupnames = [portgroupelement.childNodes[0].nodeValue.strip()
for portgroupelement in portgroupelements
if portgroupelement.childNodes]
portgroupnames = list(set(filter(None, portgroupnames)))
pg_len = len(portgroupnames)
if pg_len > 0:
return portgroupnames[random.randint(0, pg_len - 1)]
return None
def get_temp_snap_name(self, clone_name, source_device_id):
"""Construct a temporary snapshot name for clone operation.
:param clone_name: the name of the clone
:param source_device_id: the source device id
:returns: snap_name
"""
trunc_clone = self.truncate_string(clone_name, 10)
snap_name = ("temp-%(device)s-%(clone)s"
% {'device': source_device_id, 'clone': trunc_clone})
return snap_name
@staticmethod
def get_array_and_device_id(volume, external_ref):
"""Helper function for manage volume to get array name and device ID.
:param volume: volume object from API
:param external_ref: the existing volume object to be manged
:returns: string value of the array name and device ID
"""
device_id = external_ref.get(u'source-name', None)
LOG.debug("External_ref: %(er)s", {'er': external_ref})
if not device_id:
device_id = external_ref.get(u'source-id', None)
host = volume.host
host_list = host.split('+')
array = host_list[(len(host_list) - 1)]
if device_id:
LOG.debug("Get device ID of existing volume - device ID: "
"%(device_id)s, Array: %(array)s.",
{'device_id': device_id,
'array': array})
else:
exception_message = (_("Source volume device ID is required."))
raise exception.VolumeBackendAPIException(
data=exception_message)
return array, device_id
@staticmethod
def is_compression_disabled(extra_specs):
"""Check is compression is to be disabled.
:param extra_specs: extra specifications
:returns: boolean
"""
do_disable_compression = False
if DISABLECOMPRESSION in extra_specs:
if strutils.bool_from_string(extra_specs[DISABLECOMPRESSION]):
do_disable_compression = True
return do_disable_compression
def change_compression_type(self, is_source_compr_disabled, new_type):
"""Check if volume type have different compression types
:param is_source_compr_disabled: from source
:param new_type: from target
:returns: boolean
"""
extra_specs = new_type['extra_specs']
is_target_compr_disabled = self.is_compression_disabled(extra_specs)
if is_target_compr_disabled == is_source_compr_disabled:
return False
else:
return True
def change_replication(self, vol_is_replicated, new_type):
"""Check if volume types have different replication status.
:param vol_is_replicated: from source
:param new_type: from target
:return: bool
"""
is_tgt_rep = self.is_replication_enabled(new_type['extra_specs'])
return vol_is_replicated != is_tgt_rep
@staticmethod
def is_replication_enabled(extra_specs):
"""Check if replication is to be enabled.
:param extra_specs: extra specifications
:returns: bool - true if enabled, else false
"""
replication_enabled = False
if IS_RE in extra_specs:
replication_enabled = True
return replication_enabled
@staticmethod
def get_replication_config(rep_device_list):
"""Gather necessary replication configuration info.
:param rep_device_list: the replication device list from cinder.conf
:returns: rep_config, replication configuration dict
"""
rep_config = {}
if not rep_device_list:
return None
else:
target = rep_device_list[0]
try:
rep_config['array'] = target['target_device_id']
rep_config['srp'] = target['remote_pool']
rep_config['rdf_group_label'] = target['rdf_group_label']
rep_config['portgroup'] = target['remote_port_group']
except KeyError as ke:
error_message = (_("Failed to retrieve all necessary SRDF "
"information. Error received: %(ke)s.") %
{'ke': six.text_type(ke)})
LOG.exception(error_message)
raise exception.VolumeBackendAPIException(data=error_message)
allow_extend = target.get('allow_extend', 'false')
if strutils.bool_from_string(allow_extend):
rep_config['allow_extend'] = True
else:
rep_config['allow_extend'] = False
rep_mode = target.get('mode', '')
if rep_mode.lower() in ['async', 'asynchronous']:
rep_config['mode'] = REP_ASYNC
elif rep_mode.lower() == 'metro':
rep_config['mode'] = REP_METRO
metro_bias = target.get('metro_use_bias', 'false')
if strutils.bool_from_string(metro_bias):
rep_config[METROBIAS] = True
else:
rep_config[METROBIAS] = False
allow_delete_metro = target.get('allow_delete_metro', 'false')
if strutils.bool_from_string(allow_delete_metro):
rep_config['allow_delete_metro'] = True
else:
rep_config['allow_delete_metro'] = False
else:
rep_config['mode'] = REP_SYNC
return rep_config
@staticmethod
def is_volume_failed_over(volume):
"""Check if a volume has been failed over.
:param volume: the volume object
:returns: bool
"""
if volume is not None:
if volume.get('replication_status') and (
volume.replication_status ==
fields.ReplicationStatus.FAILED_OVER):
return True
return False
@staticmethod
def update_volume_model_updates(volume_model_updates,
volumes, group_id, status='available'):
"""Update the volume model's status and return it.
:param volume_model_updates: list of volume model update dicts
:param volumes: volumes object api
:param group_id: consistency group id
:param status: string value reflects the status of the member volume
:returns: volume_model_updates - updated volumes
"""
LOG.info("Updating status for group: %(id)s.", {'id': group_id})
if volumes:
for volume in volumes:
volume_model_updates.append({'id': volume.id,
'status': status})
else:
LOG.info("No volume found for group: %(cg)s.", {'cg': group_id})
return volume_model_updates
@staticmethod
def get_grp_volume_model_update(volume, volume_dict, group_id):
"""Create and return the volume model update on creation.
:param volume: volume object
:param volume_dict: the volume dict
:param group_id: consistency group id
:returns: model_update
"""
LOG.info("Updating status for group: %(id)s.", {'id': group_id})
model_update = ({'id': volume.id, 'status': 'available',
'provider_location': six.text_type(volume_dict)})
return model_update
@staticmethod
def update_extra_specs(extraspecs):
"""Update extra specs.
:param extraspecs: the additional info
:returns: extraspecs
"""
try:
pool_details = extraspecs['pool_name'].split('+')
extraspecs[SLO] = pool_details[0]
if len(pool_details) == 4:
extraspecs[WORKLOAD] = pool_details[1]
extraspecs[SRP] = pool_details[2]
extraspecs[ARRAY] = pool_details[3]
else:
# Assume no workload given in pool name
extraspecs[SRP] = pool_details[1]
extraspecs[ARRAY] = pool_details[2]
extraspecs[WORKLOAD] = 'NONE'
except KeyError:
LOG.error("Error parsing SLO, workload from"
" the provided extra_specs.")
return extraspecs
def get_volume_group_utils(self, group, interval, retries):
"""Standard utility for generic volume groups.
:param group: the generic volume group object to be created
:param interval: Interval in seconds between retries
:param retries: Retry count
:returns: array, intervals_retries_dict
:raises: VolumeBackendAPIException
"""
arrays = set()
# Check if it is a generic volume group instance
if isinstance(group, Group):
for volume_type in group.volume_types:
extra_specs = self.update_extra_specs(volume_type.extra_specs)
arrays.add(extra_specs[ARRAY])
else:
msg = (_("Unable to get volume type ids."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if len(arrays) != 1:
if not arrays:
msg = (_("Failed to get an array associated with "
"volume group: %(groupid)s.")
% {'groupid': group.id})
else:
msg = (_("There are multiple arrays "
"associated with volume group: %(groupid)s.")
% {'groupid': group.id})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
array = arrays.pop()
intervals_retries_dict = {INTERVAL: interval, RETRIES: retries}
return array, intervals_retries_dict
def update_volume_group_name(self, group):
"""Format id and name consistency group.
:param group: the generic volume group object
:returns: group_name -- formatted name + id
"""
group_name = ""
if group.name is not None and group.name != group.id:
group_name = (
self.truncate_string(
group.name, TRUNCATE_27) + "_")
group_name += group.id
return group_name
@staticmethod
def add_legacy_pools(pools):
"""Add legacy pools to allow extending a volume after upgrade.
:param pools: the pool list
:return: pools - the updated pool list
"""
extra_pools = []
for pool in pools:
if 'none' in pool['pool_name'].lower():
extra_pools.append(pool)
for pool in extra_pools:
slo = pool['pool_name'].split('+')[0]
srp = pool['pool_name'].split('+')[2]
array = pool['pool_name'].split('+')[3]
new_pool_name = ('%(slo)s+%(srp)s+%(array)s'
% {'slo': slo, 'srp': srp, 'array': array})
new_pool = deepcopy(pool)
new_pool['pool_name'] = new_pool_name
pools.append(new_pool)
return pools
def check_replication_matched(self, volume, extra_specs):
"""Check volume type and group type.
This will make sure they do not conflict with each other.
:param volume: volume to be checked
:param extra_specs: the extra specifications
:raises: InvalidInput
"""
# If volume is not a member of group, skip this check anyway.
if not volume.group:
return
vol_is_re = self.is_replication_enabled(extra_specs)
group_is_re = volume.group.is_replicated
if not (vol_is_re == group_is_re):
msg = _('Replication should be enabled or disabled for both '
'volume or group. Volume replication status: '
'%(vol_status)s, group replication status: '
'%(group_status)s') % {
'vol_status': vol_is_re, 'group_status': group_is_re}
raise exception.InvalidInput(reason=msg)
@staticmethod
def check_rep_status_enabled(group):
"""Check replication status for group.
Group status must be enabled before proceeding with certain
operations.
:param group: the group object
:raises: InvalidInput
"""
if group.is_replicated:
if group.replication_status != fields.ReplicationStatus.ENABLED:
msg = (_('Replication status should be %s for '
'replication-enabled group.')
% fields.ReplicationStatus.ENABLED)
LOG.error(msg)
raise exception.InvalidInput(reason=msg)
else:
LOG.debug('Replication is not enabled on group %s, '
'skip status check.', group.id)
@staticmethod
def get_replication_prefix(rep_mode):
"""Get the replication prefix.
Replication prefix for storage group naming is based on whether it is
synchronous, asynchronous, or metro replication mode.
:param rep_mode: flag to indicate if replication is async
:return: prefix
"""
if rep_mode == REP_ASYNC:
prefix = "-RA"
elif rep_mode == REP_METRO:
prefix = "-RM"
else:
prefix = "-RE"
return prefix
@staticmethod
def get_async_rdf_managed_grp_name(rep_config):
"""Get the name of the group used for async replication management.
:param rep_config: the replication configuration
:return: group name
"""
async_grp_name = ("OS-%(rdf)s-%(mode)s-rdf-sg"
% {'rdf': rep_config['rdf_group_label'],
'mode': rep_config['mode']})
LOG.debug("The async/ metro rdf managed group name is %(name)s",
{'name': async_grp_name})
return async_grp_name
def is_metro_device(self, rep_config, extra_specs):
"""Determine if a volume is a Metro enabled device.
:param rep_config: the replication configuration
:param extra_specs: the extra specifications
:return: bool
"""
is_metro = (True if self.is_replication_enabled(extra_specs)
and rep_config is not None
and rep_config['mode'] == REP_METRO else False)
return is_metro
def does_vol_need_rdf_management_group(self, extra_specs):
"""Determine if a volume is a Metro or Async.
:param extra_specs: the extra specifications
:return: bool
"""
if (self.is_replication_enabled(extra_specs) and
extra_specs.get(REP_MODE, None) in
[REP_ASYNC, REP_METRO]):
return True
return False
@staticmethod
def get_temp_failover_grp_name(rep_config):
"""Get the temporary group name used for failover.
:param rep_config: the replication config
:return: temp_grp_name
"""
temp_grp_name = ("OS-%(rdf)s-temp-rdf-sg"
% {'rdf': rep_config['rdf_group_label']})
LOG.debug("The temp rdf managed group name is %(name)s",
{'name': temp_grp_name})
return temp_grp_name
|
|
'''
Helper to track integer operations.
>>> track(5) * 2 - 3
7
>>> (track(5) * 2 - 3).operations
sub(lhs=mul(lhs=5, rhs=2), rhs=3)
>>> expression((track(5) * 2 - 3).operations)
'((5 * 2) - 3)'
'''
import operator
import unittest
import sys
from collections import namedtuple
from decimal import Decimal
from functools import partial as _partial
abs = namedtuple('abs', ('lhs'))
neg = namedtuple('neg', ('lhs'))
add = namedtuple('add', ('lhs', 'rhs'))
div = namedtuple('div', ('lhs', 'rhs'))
mod = namedtuple('mod', ('lhs', 'rhs'))
mul = namedtuple('mul', ('lhs', 'rhs'))
sub = namedtuple('sub', ('lhs', 'rhs'))
op_format = {
'abs': 'abs({})',
'neg': '-{}',
'add': '{} + {}',
'div': '{} / {}',
'mod': '{} % {}',
'mul': '{} * {}',
'sub': '{} - {}',
}
def track(number, operations=None):
if isinstance(number, Decimal):
return DecimalTrack(number, operations)
return IntTrack(number, operations)
def partial(function, *args, **kwargs):
''' add __get__ to python's partial implementation '''
function = _partial(function, *args, **kwargs)
# if this function is used as a property it will bind self as the first
# argument in *args, otherwise it can be used as a normal function
def bind(*args, **kwargs):
return function(*args, **kwargs)
return bind
def unary_op(type_, operator, operation, lhs):
if hasattr(lhs, 'operations') and lhs.operations is not None:
lhs_operations = lhs.operations
else:
lhs_operations = lhs
result = operator(type_(lhs))
return track(result, operation(lhs_operations))
def binary_op(type_, operator, operation, lhs, rhs):
if hasattr(lhs, 'operations') and lhs.operations is not None:
lhs_operations = lhs.operations
else:
lhs_operations = lhs
if hasattr(rhs, 'operations') and rhs.operations is not None:
rhs_operations = rhs.operations
else:
rhs_operations = rhs
result = operator(type_(lhs), type_(rhs))
return track(result, operation(lhs_operations, rhs_operations))
def binary_rop(type_, operator, operation, rhs, lhs):
return binary_op(type_, operator, operation, lhs, rhs)
def expression(operations, callback=None):
'''Create a readable string from operations, use callback to change the numeric values'''
if not isinstance(operations, tuple):
if callback:
return str(callback(operations))
return str(operations)
subexpressions = (expression(op, callback) for op in operations)
return '({})'.format(op_format[operations.__class__.__name__].format(*subexpressions))
class IntTrack(int):
def __new__(cls, value, operations=None):
obj = super(IntTrack, cls).__new__(cls, value)
obj.operations = operations
return obj
__abs__ = partial(unary_op, int, operator.abs, abs)
__neg__ = partial(unary_op, int, operator.neg, neg)
__add__ = partial(binary_op, int, operator.add, add)
__mod__ = partial(binary_op, int, operator.mod, mod)
__mul__ = partial(binary_op, int, operator.mul, mul)
__sub__ = partial(binary_op, int, operator.sub, sub)
__truediv__ = partial(binary_op, int, operator.truediv, div)
__radd__ = partial(binary_rop, int, operator.add, add)
__rmod__ = partial(binary_rop, int, operator.mod, mod)
__rmul__ = partial(binary_rop, int, operator.mul, mul)
__rsub__ = partial(binary_rop, int, operator.sub, sub)
__rtruediv__ = partial(binary_rop, int, operator.truediv, div)
if sys.version_info[0] == 2:
__div__ = partial(binary_op, int, operator.div, div)
__rdiv__ = partial(binary_rop, int, operator.div, div)
class DecimalTrack(Decimal):
# Decimal has __init__ on 2.7 but does on 3.4
def __new__(cls, value, operations=None):
obj = super(DecimalTrack, cls).__new__(cls, value)
obj.operations = operations
return obj
__abs__ = partial(unary_op, Decimal, operator.abs, abs)
__neg__ = partial(unary_op, Decimal, operator.neg, neg)
__add__ = partial(binary_op, Decimal, operator.add, add)
__mod__ = partial(binary_op, Decimal, operator.mod, mod)
__mul__ = partial(binary_op, Decimal, operator.mul, mul)
__sub__ = partial(binary_op, Decimal, operator.sub, sub)
__truediv__ = partial(binary_op, Decimal, operator.truediv, div)
__radd__ = partial(binary_rop, Decimal, operator.add, add)
__rmod__ = partial(binary_rop, Decimal, operator.mod, mod)
__rmul__ = partial(binary_rop, Decimal, operator.mul, mul)
__rsub__ = partial(binary_rop, Decimal, operator.sub, sub)
__rtruediv__ = partial(binary_op, Decimal, operator.truediv, div)
if sys.version_info[0] == 2:
__div__ = partial(binary_op, Decimal, operator.div, div)
__rdiv__ = partial(binary_rop, Decimal, operator.div, div)
class TrackTestCase(unittest.TestCase):
def test_unaryop(self):
self.assertEquals(-track(5), -5)
self.assertEquals((-track(5)).operations, neg(5))
self.assertEquals(operator.abs(track(-5)).operations, abs(-5))
def test_binaryop(self):
result = binary_op(int, operator.mul, mul, 5, 3)
self.assertEquals(result, 15)
self.assertEquals(result.operations, mul(5, 3))
result = binary_op(int, operator.add, add, result, result)
self.assertEquals(result.operations, add(mul(5, 3), mul(5, 3)))
def test_partial(self):
opmul = partial(binary_op, int, operator.mul, mul)
self.assertEquals(opmul(5, 3), binary_op(int, operator.mul, mul, 5, 3))
self.assertEquals(opmul.__get__(5)(3), binary_op(int, operator.mul, mul, 5, 3))
def test_decimal(self):
self.assertEquals(-track(Decimal(5)), -5)
self.assertEquals((-track(Decimal(5))).operations, neg(5))
self.assertEquals(operator.abs(track(Decimal(-5))).operations, abs(-5))
result = binary_op(Decimal, operator.mul, mul, 5, 3)
self.assertEquals(result, 15)
self.assertEquals(result.operations, mul(5, 3))
result = binary_op(Decimal, operator.add, add, result, result)
self.assertEquals(result.operations, add(mul(5, 3), mul(5, 3)))
def test_partial_binding(self):
def binding(value, self):
return value, self
self.assertEquals(partial(binding, 5).__get__(3)(), (5, 3))
def test_order(self):
self.assertEquals((4 * track(5)).operations, mul(4, 5))
self.assertEquals((track(7) / 3).operations, div(7, 3))
def test_expression(self):
def format_value(value):
if isinstance(value, Decimal):
return value.quantize(Decimal('0.0000'))
return value
value = 4 * track(Decimal(5.6666))
self.assertEquals(expression(value.operations, format_value), '(4 * 5.6666)')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test', action='store_true', default=False, help='flag to run the tests')
parser.add_argument('--failfast', action='store_true', default=False, help='unittest failfast')
args = parser.parse_args()
if args.test:
import doctest
suite = unittest.defaultTestLoader.loadTestsFromTestCase(TrackTestCase)
result = unittest.TextTestRunner(failfast=args.failfast).run(suite)
if result.errors or result.failures:
sys.exit(len(result.errors) + len(result.failures))
(failures, total) = doctest.testmod()
if failures:
sys.exit(failures)
|
|
# -*- coding: utf-8 -*-
#
# This file is part of Karesansui.
#
# Copyright (C) 2009-2012 HDE, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import web
from karesansui.lib.rest import Rest, auth
from karesansui.lib.pager import Pager, validates_page
from karesansui.lib.search import validates_query
from karesansui.lib.const import \
TAG_LIST_RANGE, TAG_MIN_LENGTH, TAG_MAX_LENGTH, \
ID_MIN_LENGTH, ID_MAX_LENGTH
from karesansui.lib.checker import Checker, \
CHECK_EMPTY, CHECK_VALID, \
CHECK_LENGTH, CHECK_MIN, CHECK_MAX
from karesansui.db.access.tag import \
findbyall, findby1, \
findby1name, findbyand,\
update, delete, save, new
from karesansui.lib.utils import is_param
def validates_tag(obj):
checker = Checker()
check = True
_ = obj._
checker.errors = []
check = check and checker.check_length(
_('Tag'),
obj.input.name,
TAG_MIN_LENGTH,
TAG_MAX_LENGTH,
)
obj.view.alert = checker.errors
return check
def validates_param_id(obj, tag_id):
checker = Checker()
check = True
_ = obj._
checker.errors = []
check = checker.check_number(
_('Tag ID'),
tag_id,
CHECK_EMPTY | CHECK_VALID | CHECK_MIN | CHECK_MAX,
min = ID_MIN_LENGTH,
max = ID_MAX_LENGTH,
) and check
obj.view.alert = checker.errors
return check
class Tag(Rest):
@auth
def _GET(self, *param, **params):
if not validates_query(self):
self.logger.debug("Failed to get tags. The value of query is invalid.")
return web.badrequest(self.view.alert)
if not validates_page(self):
self.logger.debug("Failed to get tags. The value of page is invalid.")
return web.badrequest(self.view.alert)
if is_param(self.input, 'q') is True:
tags = findbyand(self.orm, self.input.q)
if not tags:
self.logger.debug("Failed to get tags. No such tag - query=%s" % self.input.q)
return web.nocontent()
self.view.search_value = self.input.q
else:
tags = findbyall(self.orm)
self.view.search_value = ""
if not tags:
self.logger.debug("Failed to get tag. No tags found.")
return web.notfound()
if is_param(self.input, 'p') is True:
start = int(self.input.p)
else:
start = 0
pager = Pager(tags, start, TAG_LIST_RANGE)
if not pager.exist_now_page():
self.logger.debug("Failed to get tag. Could not find page - page=%s" % self.input.p)
return web.nocontent()
self.view.pager = pager
if self.is_mode_input():
self.view.tag = new('')
self.view.input = self.input
return True
@auth
def _POST(self, *param, **params):
if not validates_tag(self):
self.logger.debug("Failed to create tag. The value of input is invalid.")
return web.badrequest(self.view.alert)
tag = findby1name(self.orm, self.input.name)
if tag:
self.logger.debug("Failed to create tag. The same tag already exist - id='%s'" % (tag.id))
return web.conflict(web.ctx.path)
new_tag = new(self.input.name)
save(self.orm, new_tag)
return web.created(None)
class TagBy1(Rest):
@auth
def _GET(self, *param, **params):
tag_id = param[0]
if not validates_param_id(self, tag_id):
self.logger.debug("Failed to get tag. The value of parameter is invalid.")
return web.badrequest(self.view.alert)
tag = findby1(self.orm, tag_id)
if not tag:
self.logger.debug("Failed to get tag. No such tag - id=%s" % tag_id)
return web.notfound()
self.view.tag = tag
return True
@auth
def _PUT(self, *param, **params):
tag_id = param[0]
if not validates_param_id(self, tag_id):
self.logger.debug("Failed to update tag. The value of parameter is invalid.")
return web.badrequest(self.view.alert)
if not validates_tag(self):
self.logger.debug("Failed to update tag. The value of input is invalid.")
return web.badrequest(self.view.alert)
tag = findby1(self.orm, tag_id)
if not tag:
self.logger.debug("Failed to update tag. No such tag - id=%s" % tag_id)
return web.notfound()
cmp_tag = findby1name(self.orm, self.input.name)
if not cmp_tag is None:
if cmp_tag.id != tag.id:
self.logger.debug("Failed to update tag. The same tag already exist - id='%s'" % (cmp_tag.id))
return web.conflict(web.ctx.path)
tag.name = self.input.name
update(self.orm, tag)
return web.seeother(web.ctx.path)
@auth
def _DELETE(self, *param, **params):
tag_id = param[0]
if not validates_param_id(self, tag_id):
self.logger.debug("Failed to delete tag. The value of parameter is invalid.")
return web.badrequest(self.view.alert)
tag = findby1(self.orm, tag_id)
if not tag:
self.logger.debug("Failed to delete tag. No such tag - id=%s" % tag_id)
return web.notfound()
delete(self.orm, tag)
return web.seeother("/%s.%s" % ("tag", "part"))
urls = (
'/tag/(\d+)/?(\.part)$', TagBy1,
'/tag/?(\.part)$', Tag,
)
|
|
'''Deep Dreaming in Keras.
Run the script with:
```
python deep_dream.py path_to_your_base_image.jpg prefix_for_results
```
e.g.:
```
python deep_dream.py img/mypic.jpg results/dream
```
It is preferrable to run this script on GPU, for speed.
If running on CPU, prefer the TensorFlow backend (much faster).
Example results: http://i.imgur.com/FX6ROg9.jpg
'''
from __future__ import print_function
from scipy.misc import imread, imresize, imsave
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
import time
import argparse
import h5py
from keras.models import Sequential
from keras.layers.convolutional import Convolution2D, ZeroPadding2D, MaxPooling2D
from keras import backend as K
parser = argparse.ArgumentParser(description='Deep Dreams with Keras.')
parser.add_argument('base_image_path', metavar='base', type=str,
help='Path to the image to transform.')
parser.add_argument('result_prefix', metavar='res_prefix', type=str,
help='Prefix for the saved results.')
args = parser.parse_args()
base_image_path = args.base_image_path
result_prefix = args.result_prefix
# dimensions of the generated picture.
img_width = 600
img_height = 600
# path to the model weights file.
weights_path = 'vgg16_weights.h5'
# some settings we found interesting
saved_settings = {
'bad_trip': {'features': {'conv4_1': 0.05,
'conv4_2': 0.01,
'conv4_3': 0.01},
'continuity': 0.1,
'dream_l2': 0.8,
'jitter': 5},
'dreamy': {'features': {'conv5_1': 0.05,
'conv5_2': 0.02},
'continuity': 0.1,
'dream_l2': 0.02,
'jitter': 0},
}
# the settings we will use in this experiment
settings = saved_settings['bad_trip']
# util function to open, resize and format pictures into appropriate tensors
def preprocess_image(image_path):
img = imresize(imread(image_path), (img_width, img_height))
img = img.transpose((2, 0, 1)).astype('float64')
img = np.expand_dims(img, axis=0)
return img
# util function to convert a tensor into a valid image
def deprocess_image(x):
x = x.transpose((1, 2, 0))
x = np.clip(x, 0, 255).astype('uint8')
return x
# this will contain our generated image
dream = K.placeholder((1, 3, img_width, img_height))
# build the VGG16 network with our dream as input
first_layer = ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))
first_layer.input = dream
model = Sequential()
model.add(first_layer)
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2'))
model.add(ZeroPadding2D((1, 1)))
model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3'))
model.add(MaxPooling2D((2, 2), strides=(2, 2)))
# load the weights of the VGG16 networks
# (trained on ImageNet, won the ILSVRC competition in 2014)
# note: when there is a complete match between your model definition
# and your weight savefile, you can simply call model.load_weights(filename)
f = h5py.File(weights_path)
for k in range(f.attrs['nb_layers']):
if k >= len(model.layers):
# we don't look at the last (fully-connected) layers in the savefile
break
g = f['layer_{}'.format(k)]
weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])]
model.layers[k].set_weights(weights)
f.close()
print('Model loaded.')
# get the symbolic outputs of each "key" layer (we gave them unique names).
layer_dict = dict([(layer.name, layer) for layer in model.layers])
# continuity loss util function
def continuity_loss(x):
assert K.ndim(x) == 4
a = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, 1:, :img_height-1])
b = K.square(x[:, :, :img_width-1, :img_height-1] - x[:, :, :img_width-1, 1:])
return K.sum(K.pow(a + b, 1.25))
# define the loss
loss = K.variable(0.)
for layer_name in settings['features']:
# add the L2 norm of the features of a layer to the loss
assert layer_name in layer_dict.keys(), 'Layer ' + layer_name + ' not found in model.'
coeff = settings['features'][layer_name]
x = layer_dict[layer_name].get_output()
shape = layer_dict[layer_name].output_shape
# we avoid border artifacts by only involving non-border pixels in the loss
loss -= coeff * K.sum(K.square(x[:, :, 2: shape[2]-2, 2: shape[3]-2])) / np.prod(shape[1:])
# add continuity loss (gives image local coherence, can result in an artful blur)
loss += settings['continuity'] * continuity_loss(dream) / (3 * img_width * img_height)
# add image L2 norm to loss (prevents pixels from taking very high values, makes image darker)
loss += settings['dream_l2'] * K.sum(K.square(dream)) / (3 * img_width * img_height)
# feel free to further modify the loss as you see fit, to achieve new effects...
# compute the gradients of the dream wrt the loss
grads = K.gradients(loss, dream)
outputs = [loss]
if type(grads) in {list, tuple}:
outputs += grads
else:
outputs.append(grads)
f_outputs = K.function([dream], outputs)
def eval_loss_and_grads(x):
x = x.reshape((1, 3, img_width, img_height))
outs = f_outputs([x])
loss_value = outs[0]
if len(outs[1:]) == 1:
grad_values = outs[1].flatten().astype('float64')
else:
grad_values = np.array(outs[1:]).flatten().astype('float64')
return loss_value, grad_values
# this Evaluator class makes it possible
# to compute loss and gradients in one pass
# while retrieving them via two separate functions,
# "loss" and "grads". This is done because scipy.optimize
# requires separate functions for loss and gradients,
# but computing them separately would be inefficient.
class Evaluator(object):
def __init__(self):
self.loss_value = None
self.grads_values = None
def loss(self, x):
assert self.loss_value is None
loss_value, grad_values = eval_loss_and_grads(x)
self.loss_value = loss_value
self.grad_values = grad_values
return self.loss_value
def grads(self, x):
assert self.loss_value is not None
grad_values = np.copy(self.grad_values)
self.loss_value = None
self.grad_values = None
return grad_values
evaluator = Evaluator()
# run scipy-based optimization (L-BFGS) over the pixels of the generated image
# so as to minimize the loss
x = preprocess_image(base_image_path)
for i in range(10):
print('Start of iteration', i)
start_time = time.time()
# add a random jitter to the initial image. This will be reverted at decoding time
random_jitter = (settings['jitter'] * 2) * (np.random.random((3, img_width, img_height)) - 0.5)
x += random_jitter
# run L-BFGS for 7 steps
x, min_val, info = fmin_l_bfgs_b(evaluator.loss, x.flatten(),
fprime=evaluator.grads, maxfun=7)
print('Current loss value:', min_val)
# decode the dream and save it
x = x.reshape((3, img_width, img_height))
x -= random_jitter
img = deprocess_image(x)
fname = result_prefix + '_at_iteration_%d.png' % i
imsave(fname, img)
end_time = time.time()
print('Image saved as', fname)
print('Iteration %d completed in %ds' % (i, end_time - start_time))
|
|
#!/usr/bin/env python2
"""Command line utility for querying the Logitech Harmony."""
import argparse
import logging
import json
import sys
from harmony import auth
from harmony import client as harmony_client
LOGGER = logging.getLogger(__name__)
def login_to_logitech(args):
"""Logs in to the Logitech service.
Args:
args: argparse arguments needed to login.
Returns:
Session token that can be used to log in to the Harmony device.
"""
token = auth.login(args.email, args.password)
if not token:
sys.exit('Could not get token from Logitech server.')
session_token = auth.swap_auth_token(
args.harmony_ip, args.harmony_port, token)
if not session_token:
sys.exit('Could not swap login token for session token.')
return session_token
def login_to_logitech_site(email, password, harmony_ip, harmony_port):
token = auth.login(email, password)
if not token:
sys.exit('Could not get token from Logitech server.')
session_token = auth.swap_auth_token(
harmony_ip, harmony_port, token)
if not session_token:
sys.exit('Could not swap login token for session token.')
return session_token
def pprint(obj):
"""Pretty JSON dump of an object."""
print(json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': ')))
def get_client(args):
"""Connect to the Harmony and return a Client instance."""
token = login_to_logitech(args)
client = harmony_client.create_and_connect_client(
args.harmony_ip, args.harmony_port, token)
return client
def get_client_obj(email, password, harmony_ip, harmony_port):
token = login_to_logitech_site(email, password)
client = harmony_client.create_and_connect_client(harmony_ip, harmony_port, token)
return client
def show_config(args):
"""Connects to the Harmony and prints its configuration."""
client = get_client(args)
pprint(client.get_config())
client.disconnect(send_close=True)
return 0
def show_configuration(email, password, harmony_ip, harmony_port=5222):
client = get_client_obj(email, password, harmony_ip, harmony_port)
pprint(client.get_config())
client.disconnect(send_close=True)
return False
def show_current_activity(args):
"""Connects to the Harmony and prints the current activity block
from the config."""
client = get_client(args)
config = client.get_config()
current_activity_id = client.get_current_activity()
activity = [x for x in config['activity'] if int(x['id']) == current_activity_id][0]
pprint(activity)
client.disconnect(send_close=True)
return 0
def sync(args):
"""Connects to the Harmony and syncs it.
"""
client = get_client(args)
client.sync()
client.disconnect(send_close=True)
return 0
def turn_off(args):
"""Sends a 'turn off' command to the harmony, which is the activity
'-1'."""
args.activity = '-1'
start_activity(args)
def start_activity(args):
"""Connects to the Harmony and switches to a different activity,
specified as an id or label."""
client = get_client(args)
config = client.get_config()
activity_off = False
activity_numeric = False
activity_id = None
activity_label = None
try:
activity_off = float(args.activity) == -1
activity_id = int(float(args.activity))
activity_numeric = True
except ValueError:
activity_off = args.activity.lower() == 'turn off'
activity_label = str(args.activity)
if activity_off:
activity = [ {'id': -1, 'label': 'Turn Off'} ]
else:
activity = [x for x in config['activity']
if (activity_numeric and int(x['id']) == activity_id)
or x['label'].lower() == activity_label.lower()
]
if not activity:
LOGGER.error('could not find activity: ' + args.activity)
client.disconnect(send_close=True)
return 1
activity = activity[0]
client.start_activity(int(activity['id']))
LOGGER.info("started activity: '%s' of id: '%s'" % (activity['label'], activity['id']))
client.disconnect(send_close=True)
return 0
def send_command(args):
"""Connects to the Harmony and send a simple command."""
client = get_client(args)
config = client.get_config()
device = args.device if args.device_id is None else args.device_id
device_numeric = None
try:
device_numeric = int(float(device))
except ValueError:
pass
device_config = [x for x in config['device'] if device.lower() == x['label'].lower() or
((device_numeric is not None) and device_numeric == int(x['id']))]
if not device_config:
LOGGER.error('could not find device: ' + device)
client.disconnect(send_close=True)
return 1
device_id = int(device_config[0]['id'])
client.send_command(device_id, args.command)
client.disconnect(send_close=True)
return 0
def main():
"""Main method for the script."""
parser = argparse.ArgumentParser(
description='pyharmony utility script',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Required flags go here.
required_flags = parser.add_argument_group('required arguments')
required_flags.add_argument('--email', required=True, help=(
'Logitech username in the form of an email address.'))
required_flags.add_argument(
'--password', required=True, help='Logitech password.')
required_flags.add_argument(
'--harmony_ip', required=True, help='IP Address of the Harmony device.')
# Flags with defaults go here.
parser.add_argument('--harmony_port', default=5222, type=int, help=(
'Network port that the Harmony is listening on.'))
loglevels = dict((logging.getLevelName(level), level)
for level in [10, 20, 30, 40, 50])
parser.add_argument('--loglevel', default='INFO', choices=loglevels.keys(),
help='Logging level to print to the console.')
subparsers = parser.add_subparsers()
show_config_parser = subparsers.add_parser(
'show_config', help='Print the Harmony device configuration.')
show_config_parser.set_defaults(func=show_config)
show_activity_parser = subparsers.add_parser(
'show_current_activity', help='Print the current activity config.')
show_activity_parser.set_defaults(func=show_current_activity)
start_activity_parser = subparsers.add_parser(
'start_activity', help='Switch to a different activity.')
start_activity_parser.add_argument(
'activity', help='Activity to switch to, id or label.')
start_activity_parser.set_defaults(func=start_activity)
sync_parser = subparsers.add_parser(
'sync', help='Sync the harmony.')
sync_parser.set_defaults(func=sync)
turn_off_parser = subparsers.add_parser(
'turn_off', help='Send a turn off command to the harmony.')
turn_off_parser.set_defaults(func=turn_off)
command_parser = subparsers.add_parser(
'send_command', help='Send a simple command.')
command_parser.add_argument('--command',
help='IR Command to send to the device.', required=True)
device_arg_group = command_parser.add_mutually_exclusive_group(required=True)
device_arg_group.add_argument('--device_id',
help='Specify the device id to which we will send the command.')
device_arg_group.add_argument('--device',
help='Specify the device id or label to which we will send the command.')
command_parser.set_defaults(func=send_command)
args = parser.parse_args()
logging.basicConfig(
level=loglevels[args.loglevel],
format='%(levelname)s:\t%(name)s\t%(message)s')
sys.exit(args.func(args))
if __name__ == '__main__':
main()
|
|
from abc import ABC
from typing import Any
from toil.job import Promise
class WDLRuntimeError(RuntimeError):
pass
class WDLType:
"""
Represents a primitive or compound WDL type:
https://github.com/openwdl/wdl/blob/main/versions/development/SPEC.md#types
"""
def __init__(self, optional: bool = False):
self.optional = optional
@property
def name(self) -> str:
"""
Type name as string. Used in display messages / 'mappings.out' if dev
mode is enabled.
"""
raise NotImplementedError
@property
def default_value(self):
"""
Default value if optional.
"""
return None
def create(self, value: Any, output: bool = False) -> Any:
"""
Calls at runtime. Returns an instance of the current type. An error may
be raised if the value is not in the correct format.
:param value: a Python object
"""
if value is None:
# check if input is in fact an optional.
if self.optional:
return self.default_value
else:
raise WDLRuntimeError(f"Required input for '{self.name}' type not specified.")
if isinstance(value, Promise):
return value
return self._create(value)
def _create(self, value: Any) -> Any:
raise NotImplementedError
def __eq__(self, other):
return self.name.__eq__(other)
def __str__(self):
return self.name.__str__()
def __repr__(self):
return self.name.__repr__()
class WDLCompoundType(WDLType, ABC):
"""
Represents a WDL compound type.
"""
pass
class WDLStringType(WDLType):
""" Represents a WDL String primitive type."""
@property
def name(self) -> str:
return 'String'
@property
def default_value(self):
return ''
def _create(self, value: Any) -> Any:
return str(value)
class WDLIntType(WDLType):
""" Represents a WDL Int primitive type."""
@property
def name(self) -> str:
return 'Int'
def _create(self, value: Any) -> Any:
return int(value)
class WDLFloatType(WDLType):
""" Represents a WDL Float primitive type."""
@property
def name(self) -> str:
return 'Float'
def _create(self, value: Any) -> Any:
return float(value)
class WDLBooleanType(WDLType):
""" Represents a WDL Boolean primitive type."""
@property
def name(self) -> str:
return 'Boolean'
def _create(self, value: Any) -> Any:
return True if value else False
class WDLFileType(WDLType):
""" Represents a WDL File primitive type."""
@property
def name(self) -> str:
return 'File'
@property
def default_value(self):
return ''
def _create(self, value: Any) -> Any:
if isinstance(value, (WDLFile, Promise)):
# return the original file if it's passed from task to task.
return value
return WDLFile(file_path=value, imported=False)
class WDLArrayType(WDLCompoundType):
""" Represents a WDL Array compound type."""
def __init__(self, element: WDLType, optional: bool = False):
super().__init__(optional)
self.element = element
@property
def name(self) -> str:
return f'Array[{self.element.name}]'
def _create(self, value: Any) -> Any:
if not isinstance(value, list):
raise WDLRuntimeError(f"Expected an array input for Array, but got '{type(value)}'")
return [self.element.create(val) for val in value]
class WDLPairType(WDLCompoundType):
""" Represents a WDL Pair compound type."""
def __init__(self, left: WDLType, right: WDLType, optional: bool = False):
super().__init__(optional)
self.left = left
self.right = right
@property
def name(self) -> str:
return f'Pair[{self.left.name}, {self.right.name}]'
def _create(self, value: Any) -> Any:
if isinstance(value, WDLPair):
return value
elif isinstance(value, tuple):
if len(value) != 2:
raise WDLRuntimeError('Only support Pair len == 2')
left, right = value
elif isinstance(value, dict):
if 'left' not in value or 'right' not in value:
raise WDLRuntimeError('Pair needs \'left\' and \'right\' keys')
left = value.get('left')
right = value.get('right')
else:
raise WDLRuntimeError(f"Expected a pair input for Pair, but got '{type(value)}'")
return WDLPair(self.left.create(left), self.right.create(right))
class WDLMapType(WDLCompoundType):
""" Represents a WDL Map compound type."""
def __init__(self, key: WDLType, value: WDLType, optional: bool = False):
super().__init__(optional)
self.key = key
self.value = value
@property
def name(self) -> str:
return f'Map[{self.key.name}, {self.value.name}]'
def _create(self, value: Any) -> Any:
if not isinstance(value, dict):
raise WDLRuntimeError(f"Expected a map input for Map, but got '{type(value)}'")
return {self.key.create(k): self.value.create(v) for k, v in value.items()}
class WDLFile:
"""
Represents a WDL File.
"""
def __init__(self, file_path, file_name=None, imported=False):
"""
:param file_path: Path to file.
:param file_name: Optional. Preserved file name.
:param imported: If True, this file has been imported to the fileStore
via fileStore.importFile().
"""
self.file_path = file_path
self.file_name = file_name
self.imported = imported
class WDLPair:
"""
Represents a WDL Pair literal defined at
https://github.com/openwdl/wdl/blob/main/versions/development/SPEC.md#pair-literals
"""
def __init__(self, left: Any, right: Any):
self.left = left
self.right = right
def to_dict(self):
return {'left': self.left, 'right': self.right}
def __eq__(self, other):
if not isinstance(other, WDLPair):
return False
return self.left == other.left and self.right == other.right
def __repr__(self):
return str(self.to_dict())
|
|
"""Test the Home Assistant local auth provider."""
import asyncio
from unittest.mock import Mock, patch
import pytest
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.auth import auth_manager_from_config, auth_store
from homeassistant.auth.providers import (
auth_provider_from_config,
homeassistant as hass_auth,
)
from tests.common import mock_coro
@pytest.fixture
def data(hass):
"""Create a loaded data class."""
data = hass_auth.Data(hass)
hass.loop.run_until_complete(data.async_load())
return data
@pytest.fixture
def legacy_data(hass):
"""Create a loaded legacy data class."""
data = hass_auth.Data(hass)
hass.loop.run_until_complete(data.async_load())
data.is_legacy = True
return data
async def test_validating_password_invalid_user(data, hass):
"""Test validating an invalid user."""
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("non-existing", "pw")
async def test_not_allow_set_id():
"""Test we are not allowed to set an ID in config."""
hass = Mock()
with pytest.raises(vol.Invalid):
await auth_provider_from_config(
hass, None, {"type": "homeassistant", "id": "invalid"}
)
async def test_new_users_populate_values(hass, data):
"""Test that we populate data for new users."""
data.add_auth("hello", "test-pass")
await data.async_save()
manager = await auth_manager_from_config(hass, [{"type": "homeassistant"}], [])
provider = manager.auth_providers[0]
credentials = await provider.async_get_or_create_credentials({"username": "hello"})
user = await manager.async_get_or_create_user(credentials)
assert user.name == "hello"
assert user.is_active
async def test_changing_password_raises_invalid_user(data, hass):
"""Test that changing password raises invalid user."""
with pytest.raises(hass_auth.InvalidUser):
data.change_password("non-existing", "pw")
# Modern mode
async def test_adding_user(data, hass):
"""Test adding a user."""
data.add_auth("test-user", "test-pass")
data.validate_login(" test-user ", "test-pass")
async def test_adding_user_duplicate_username(data, hass):
"""Test adding a user with duplicate username."""
data.add_auth("test-user", "test-pass")
with pytest.raises(hass_auth.InvalidUser):
data.add_auth("TEST-user ", "other-pass")
async def test_validating_password_invalid_password(data, hass):
"""Test validating an invalid password."""
data.add_auth("test-user", "test-pass")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login(" test-user ", "invalid-pass")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("test-user", "test-pass ")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("test-user", "Test-pass")
async def test_changing_password(data, hass):
"""Test adding a user."""
data.add_auth("test-user", "test-pass")
data.change_password("TEST-USER ", "new-pass")
with pytest.raises(hass_auth.InvalidAuth):
data.validate_login("test-user", "test-pass")
data.validate_login("test-UsEr", "new-pass")
async def test_login_flow_validates(data, hass):
"""Test login flow."""
data.add_auth("test-user", "test-pass")
await data.async_save()
provider = hass_auth.HassAuthProvider(
hass, auth_store.AuthStore(hass), {"type": "homeassistant"}
)
flow = await provider.async_login_flow({})
result = await flow.async_step_init()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await flow.async_step_init(
{"username": "incorrect-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await flow.async_step_init(
{"username": "TEST-user ", "password": "incorrect-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await flow.async_step_init(
{"username": "test-USER", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["username"] == "test-USER"
async def test_saving_loading(data, hass):
"""Test saving and loading JSON."""
data.add_auth("test-user", "test-pass")
data.add_auth("second-user", "second-pass")
await data.async_save()
data = hass_auth.Data(hass)
await data.async_load()
data.validate_login("test-user ", "test-pass")
data.validate_login("second-user ", "second-pass")
async def test_get_or_create_credentials(hass, data):
"""Test that we can get or create credentials."""
manager = await auth_manager_from_config(hass, [{"type": "homeassistant"}], [])
provider = manager.auth_providers[0]
provider.data = data
credentials1 = await provider.async_get_or_create_credentials({"username": "hello"})
with patch.object(
provider, "async_credentials", return_value=mock_coro([credentials1])
):
credentials2 = await provider.async_get_or_create_credentials(
{"username": "hello "}
)
assert credentials1 is credentials2
# Legacy mode
async def test_legacy_adding_user(legacy_data, hass):
"""Test in legacy mode adding a user."""
legacy_data.add_auth("test-user", "test-pass")
legacy_data.validate_login("test-user", "test-pass")
async def test_legacy_adding_user_duplicate_username(legacy_data, hass):
"""Test in legacy mode adding a user with duplicate username."""
legacy_data.add_auth("test-user", "test-pass")
with pytest.raises(hass_auth.InvalidUser):
legacy_data.add_auth("test-user", "other-pass")
# Not considered duplicate
legacy_data.add_auth("test-user ", "test-pass")
legacy_data.add_auth("Test-user", "test-pass")
async def test_legacy_validating_password_invalid_password(legacy_data, hass):
"""Test in legacy mode validating an invalid password."""
legacy_data.add_auth("test-user", "test-pass")
with pytest.raises(hass_auth.InvalidAuth):
legacy_data.validate_login("test-user", "invalid-pass")
async def test_legacy_changing_password(legacy_data, hass):
"""Test in legacy mode adding a user."""
user = "test-user"
legacy_data.add_auth(user, "test-pass")
legacy_data.change_password(user, "new-pass")
with pytest.raises(hass_auth.InvalidAuth):
legacy_data.validate_login(user, "test-pass")
legacy_data.validate_login(user, "new-pass")
async def test_legacy_changing_password_raises_invalid_user(legacy_data, hass):
"""Test in legacy mode that we initialize an empty config."""
with pytest.raises(hass_auth.InvalidUser):
legacy_data.change_password("non-existing", "pw")
async def test_legacy_login_flow_validates(legacy_data, hass):
"""Test in legacy mode login flow."""
legacy_data.add_auth("test-user", "test-pass")
await legacy_data.async_save()
provider = hass_auth.HassAuthProvider(
hass, auth_store.AuthStore(hass), {"type": "homeassistant"}
)
flow = await provider.async_login_flow({})
result = await flow.async_step_init()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await flow.async_step_init(
{"username": "incorrect-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await flow.async_step_init(
{"username": "test-user", "password": "incorrect-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_auth"
result = await flow.async_step_init(
{"username": "test-user", "password": "test-pass"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["username"] == "test-user"
async def test_legacy_saving_loading(legacy_data, hass):
"""Test in legacy mode saving and loading JSON."""
legacy_data.add_auth("test-user", "test-pass")
legacy_data.add_auth("second-user", "second-pass")
await legacy_data.async_save()
legacy_data = hass_auth.Data(hass)
await legacy_data.async_load()
legacy_data.is_legacy = True
legacy_data.validate_login("test-user", "test-pass")
legacy_data.validate_login("second-user", "second-pass")
with pytest.raises(hass_auth.InvalidAuth):
legacy_data.validate_login("test-user ", "test-pass")
async def test_legacy_get_or_create_credentials(hass, legacy_data):
"""Test in legacy mode that we can get or create credentials."""
manager = await auth_manager_from_config(hass, [{"type": "homeassistant"}], [])
provider = manager.auth_providers[0]
provider.data = legacy_data
credentials1 = await provider.async_get_or_create_credentials({"username": "hello"})
with patch.object(
provider, "async_credentials", return_value=mock_coro([credentials1])
):
credentials2 = await provider.async_get_or_create_credentials(
{"username": "hello"}
)
assert credentials1 is credentials2
with patch.object(
provider, "async_credentials", return_value=mock_coro([credentials1])
):
credentials3 = await provider.async_get_or_create_credentials(
{"username": "hello "}
)
assert credentials1 is not credentials3
async def test_race_condition_in_data_loading(hass):
"""Test race condition in the hass_auth.Data loading.
Ref issue: https://github.com/home-assistant/home-assistant/issues/21569
"""
counter = 0
async def mock_load(_):
"""Mock of homeassistant.helpers.storage.Store.async_load."""
nonlocal counter
counter += 1
await asyncio.sleep(0)
provider = hass_auth.HassAuthProvider(
hass, auth_store.AuthStore(hass), {"type": "homeassistant"}
)
with patch("homeassistant.helpers.storage.Store.async_load", new=mock_load):
task1 = provider.async_validate_login("user", "pass")
task2 = provider.async_validate_login("user", "pass")
results = await asyncio.gather(task1, task2, return_exceptions=True)
assert counter == 1
assert isinstance(results[0], hass_auth.InvalidAuth)
# results[1] will be a TypeError if race condition occurred
assert isinstance(results[1], hass_auth.InvalidAuth)
|
|
import json
import inspect
import numpy as np
import pandas as pd
from sklearn.preprocessing import binarize
from abc import abstractmethod, ABC, ABCMeta
# imports from gpmap dependency
from gpmap.gpm import GenotypePhenotypeMap
# Local imports
from epistasis.mapping import EpistasisMap, encoding_to_sites
from epistasis.matrix import get_model_matrix
from epistasis.utils import (extract_mutations_from_genotypes,
genotypes_to_X)
from .utils import XMatrixException
from sklearn.base import RegressorMixin, BaseEstimator
class SubclassException(Exception):
"""Subclass Exception for parent classes."""
def use_sklearn(sklearn_class):
"""Swap out last class in the inherited stack (Assuming its
the BaseModel) with the AbstractModel below. Then, sandwiches
the Sklearn class with all other base classes first, followed
by the Sklearn class and the AbstractModel.
"""
def mixer(cls):
# Meta program the class
bases = cls.__bases__[:-1]
name = cls.__name__
methods = dict(cls.__dict__)
# Put Sklearn first in line of parent classes
parents = bases + (sklearn_class, AbstractModel)
# Rebuild class with Mixed in scikit learn.
cls = type(name, parents, methods)
return cls
return mixer
class AbstractModel(ABC):
"""Abstract Base Class for all epistasis models.
This class sets all docstrings not given in subclasses.
"""
def __new__(self, *args, **kwargs):
"""Replace the docstrings of a subclass with docstrings in
this base class.
"""
# Get items in BaseModel.
for name, member in inspect.getmembers(AbstractModel):
# Get the docstring for this item
doc = getattr(member, '__doc__')
# Replace the docstring in self with basemodel docstring.
try:
member = getattr(self, name)
member.__doc__ = doc
except AttributeError:
pass
return super(AbstractModel, self).__new__(self)
# --------------------------------------------------------------
# Abstract Properties
# --------------------------------------------------------------
@property
@abstractmethod
def num_of_params(self):
"""Number of parameters in model.
"""
raise SubclassException("Must be implemented in a subclass.")
# --------------------------------------------------------------
# Abstract Methods
# --------------------------------------------------------------
@abstractmethod
def fit(self, X=None, y=None, **kwargs):
"""Fit model to data.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : None or ndarray (default=None)
array of phenotypes. If None, the phenotypes in the attached
genotype-phenotype map is used.
Returns
-------
self :
The model is returned. Allows chaining methods.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def fit_transform(self, X=None, y=None, **kwargs):
"""Fit model to data and transform output according to model.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : None or ndarray (default=None)
array of phenotypes. If None, the phenotypes in the attached
genotype-phenotype map is used.
Returns
-------
gpm : GenotypePhenotypeMap
The genotype-phenotype map object with transformed genotypes.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def predict(self, X=None):
"""Use model to predict phenotypes for a given list of genotypes.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
Returns
-------
y : ndarray
array of phenotypes.
"""
raise SubclassException("Must be implemented in a subclass.")
def predict_to_df(self, X=None):
"""Predict a list of genotypes and write the results to a dataframe."""
# ------- Handle X --------------
# Get object type.
obj = X.__class__
if X is None:
X = self.gpm.genotypes
elif obj is str and X[0] in self.gpm.mutations[0]:
X = [X]
elif obj is np.ndarray and X.ndim == 2:
raise Exception("X must be a list of genotypes")
elif obj in [list, np.ndarray, pd.DataFrame, pd.Series]:
pass
else:
raise Exception("X must be a list of genotypes.")
# -------- Predict ---------------
y = self.predict(X=X)
return pd.DataFrame(dict(
genotypes=X,
phenotypes=y
))
def predict_to_csv(self, filename, X=None):
"""Predict a list of genotypes and write the results to a CSV."""
df = self.predict_to_df(X=X)
df.to_csv(filename, index=False)
def predict_to_excel(self, filename, X=None):
"""Predict a list of genotypes and write the results to a Excel."""
df = self.predict_to_df(X=X)
df.to_excel(filename, index=False)
@abstractmethod
def predict_transform(self, X=None, y=None, **kwargs):
"""Transform a set of phenotypes according to the model.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : ndarray
An array of phenotypes to transform.
Returns
-------
y_transform : ndarray
array of phenotypes.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def hypothesis(self, X=None, thetas=None):
"""Compute phenotypes from given model parameters.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
thetas : ndarray
array of model parameters. See thetas property for specifics.
Returns
-------
y : ndarray
array of phenotypes predicted by model parameters.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def hypothesis_transform(self, X=None, y=None, thetas=None):
"""Transform phenotypes with given model parameters.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : ndarray
An array of phenotypes to transform.
thetas : ndarray
array of model parameters. See thetas property for specifics.
Returns
-------
y : ndarray
array of phenotypes predicted by model parameters.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def lnlike_of_data(
self,
X=None,
y=None,
yerr=None,
thetas=None):
"""Compute the individUal log-likelihoods for each datapoint from a set
of model parameters.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : ndarray
An array of phenotypes to transform.
yerr : ndarray
An array of the measured phenotypes standard deviations.
thetas : ndarray
array of model parameters. See thetas property for specifics.
Returns
-------
y : ndarray
array of phenotypes predicted by model parameters.
"""
raise SubclassException("Must be implemented in a subclass.")
@abstractmethod
def lnlike_transform(
self,
X=None,
y=None,
yerr=None,
lnprior=None,
thetas=None):
"""Compute the individual log-likelihoods for each datapoint from a set
of model parameters and a prior.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : ndarray
An array of phenotypes to transform.
yerr : ndarray
An array of the measured phenotypes standard deviations.
lnprior : ndarray
An array of priors for a given datapoint.
thetas : ndarray
array of model parameters. See thetas property for specifics.
Returns
-------
y : ndarray
array of phenotypes predicted by model parameters.
"""
raise SubclassException("Must be implemented in a subclass.")
def lnlikelihood(
self,
X=None,
y=None,
yerr=None,
thetas=None):
"""Compute the individal log-likelihoods for each datapoint from a set
of model parameters.
Parameters
----------
X : None, ndarray, or list of genotypes. (default=None)
data used to construct X matrix that maps genotypes to
model coefficients. If None, the model uses genotypes in the
attached genotype-phenotype map. If a list of strings,
the strings are genotypes that will be converted to an X matrix.
If ndarray, the function assumes X is the X matrix used by the
epistasis model.
y : ndarray
An array of phenotypes to transform.
yerr : ndarray
An array of the measured phenotypes standard deviations.
thetas : ndarray
array of model parameters. See thetas property for specifics.
Returns
-------
lnlike : float
log-likelihood of the model parameters.
"""
lnlike = np.sum(self.lnlike_of_data(X=X, y=y, yerr=yerr, thetas=thetas))
# If log-likelihood is infinite, set to negative infinity.
if np.isinf(lnlike) or np.isnan(lnlike):
return -np.inf
return lnlike
def add_X(self, X=None, key=None):
"""Add X to Xbuilt
Keyword arguments for X:
- None :
Uses ``gpm.binary`` to construct X. If genotypes
are missing they will not be included in fit. At the end of
fitting, an epistasis map attribute is attached to the model
class.
Parameters
----------
X :
see above for details.
key : str
name for storing the matrix.
Returns
-------
Xbuilt : numpy.ndarray
newly built 2d array matrix
"""
if isinstance(X, str) and X == None:
if hasattr(self, "gpm") is False:
raise XMatrixException("To build None, 'missing', or"
"'complete' X matrix, a "
"GenotypePhenotypeMap must be attached")
# Get X columns
columns = self.Xcolumns
# Use desired set of genotypes for rows in X matrix.
index = self.gpm.binary
# Build numpy array
x = get_model_matrix(index, columns, model_type=self.model_type)
# Set matrix with given key.
if key is None:
key = X
self.Xbuilt[key] = x
elif type(X) == np.ndarray or type(X) == pd.DataFrame:
# Set key
if key is None:
raise Exception("A key must be given to store.")
# Store Xmatrix.
self.Xbuilt[key] = X
else:
raise XMatrixException("X must be one of the following: None, "
"'complete', numpy.ndarray, or "
"pandas.DataFrame.")
Xbuilt = self.Xbuilt[key]
return Xbuilt
def add_gpm(self, gpm):
"""Add a GenotypePhenotypeMap object to the epistasis model.
"""
self._gpm = gpm
# Reset Xbuilt.
self.Xbuilt = {}
# Construct columns for X matrix
self.Xcolumns = encoding_to_sites(self.order, self.gpm.encoding_table)
# Map those columns to epistastalis dataframe.
self.epistasis = EpistasisMap(sites=self.Xcolumns, gpm=gpm)
return self
@property
def gpm(self):
"""Data stored in a GenotypePhenotypeMap object."""
return self._gpm
# -----------------------------------------------------------
# Argument handlers.
# -----------------------------------------------------------
def _X(self, data=None, method=None):
"""Handle the X argument in this model."""
# Get object type.
obj = data.__class__
X = data
# If X is None, see if we saved an array.
if X is None:
# Get X from genotypes
X = genotypes_to_X(
self.gpm.genotypes,
self.gpm,
order=self.order,
model_type=self.model_type
)
elif obj is str and X in self.gpm.genotypes:
single_genotype = [X]
# Get X from genotypes
X = genotypes_to_X(
single_genotype,
self.gpm,
order=self.order,
model_type=self.model_type
)
# If X is a keyword in Xbuilt, use it.
elif obj is str and X in self.Xbuilt:
X = self.Xbuilt[X]
# If 2-d array, keep as so.
elif obj is np.ndarray and X.ndim == 2:
pass
# If list of genotypes.
elif obj in [list, np.ndarray, pd.DataFrame, pd.Series]:
genotypes = X
# Get X from genotypes
X = genotypes_to_X(
genotypes,
self.gpm,
order=self.order,
model_type=self.model_type
)
else:
raise Exception("X is invalid.")
# Save X
self.Xbuilt[method] = X
return X
def _y(self, data=None, method=None):
"""Handle y arguments in this model."""
# Get object type.
obj = data.__class__
y = data
if y is None:
return self.gpm.phenotypes
elif obj in [list, np.ndarray, pd.Series, pd.DataFrame]:
return y
else:
raise Exception("y is invalid.")
def _yerr(self, data=None, method=None):
"""Handle yerr argument in this model."""
# Get object type.
obj = data.__class__
yerr = data
if yerr is None:
return self.gpm.std.upper
elif obj in [list, np.ndarray, pd.Series, pd.DataFrame]:
return yerr
else:
raise Exception("yerr is invalid.")
def _thetas(self, data=None, method=None):
"""Handle yerr argument in this model."""
# Get object type.
obj = data.__class__
thetas = data
if thetas is None:
return self.thetas
elif obj in [list, np.ndarray, pd.Series, pd.DataFrame]:
return thetas
else:
raise Exception("thetas is invalid.")
def _lnprior(self, data=None, method=None):
# Get object type.
obj = data.__class__
_lnprior = data
if _lnprior is None:
return np.zeros(self.gpm.n)
elif obj in [list, np.ndarray, pd.Series, pd.DataFrame]:
return _lnprior
else:
raise Exception("_prior is invalid.")
class BaseModel(AbstractModel, RegressorMixin, BaseEstimator):
"""Base model for defining an epistasis model.
"""
pass
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Beat and tempo
==============
.. autosummary::
:toctree: generated/
beat_track
estimate_tempo
"""
import numpy as np
import scipy
from . import cache
from . import core
from . import onset
from . import util
from .util.exceptions import ParameterError
__all__ = ['beat_track', 'estimate_tempo']
def beat_track(y=None, sr=22050, onset_envelope=None, hop_length=512,
start_bpm=120.0, tightness=100, trim=True, bpm=None,
units='frames'):
r'''Dynamic programming beat tracker.
Beats are detected in three stages, following the method of [1]_:
1. Measure onset strength
2. Estimate tempo from onset correlation
3. Pick peaks in onset strength approximately consistent with estimated
tempo
.. [1] Ellis, Daniel PW. "Beat tracking by dynamic programming."
Journal of New Music Research 36.1 (2007): 51-60.
http://labrosa.ee.columbia.edu/projects/beattrack/
Parameters
----------
y : np.ndarray [shape=(n,)] or None
audio time series
sr : number > 0 [scalar]
sampling rate of `y`
onset_envelope : np.ndarray [shape=(n,)] or None
(optional) pre-computed onset strength envelope.
hop_length : int > 0 [scalar]
number of audio samples between successive `onset_envelope` values
start_bpm : float > 0 [scalar]
initial guess for the tempo estimator (in beats per minute)
tightness : float [scalar]
tightness of beat distribution around tempo
trim : bool [scalar]
trim leading/trailing beats with weak onsets
bpm : float [scalar]
(optional) If provided, use `bpm` as the tempo instead of
estimating it from `onsets`.
units : {'frames', 'samples', 'time'}
The units to encode detected beat events in.
By default, 'frames' are used.
Returns
-------
tempo : float [scalar, non-negative]
estimated global tempo (in beats per minute)
beats : np.ndarray [shape=(m,)]
estimated beat event locations in the specified units
(default is frame indices)
.. note::
If no onset strength could be detected, beat_tracker estimates 0 BPM
and returns an empty list.
Raises
------
ParameterError
if neither `y` nor `onset_envelope` are provided
or if `units` is not one of 'frames', 'samples', or 'time'
See Also
--------
librosa.onset.onset_strength
Examples
--------
Track beats using time series input
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
>>> tempo
64.599609375
Print the first 20 beat frames
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Or print them as timestamps
>>> librosa.frames_to_time(beats[:20], sr=sr)
array([ 7.43 , 8.29 , 9.218, 10.124, 11.146, 12.19 ,
13.212, 14.141, 15.279, 16.208, 17.113, 18.042,
18.971, 19.9 , 20.805, 21.734, 22.663, 23.591,
24.497, 25.426])
Track beats using a pre-computed onset envelope
>>> onset_env = librosa.onset.onset_strength(y, sr=sr,
... aggregate=np.median)
>>> tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env,
... sr=sr)
>>> tempo
64.599609375
>>> beats[:20]
array([ 320, 357, 397, 436, 480, 525, 569, 609, 658,
698, 737, 777, 817, 857, 896, 936, 976, 1016,
1055, 1095])
Plot the beat events against the onset strength envelope
>>> import matplotlib.pyplot as plt
>>> hop_length = 512
>>> plt.figure(figsize=(8, 4))
>>> times = librosa.frames_to_time(np.arange(len(onset_env)),
... sr=sr, hop_length=hop_length)
>>> plt.plot(times, librosa.util.normalize(onset_env),
... label='Onset strength')
>>> plt.vlines(times[beats], 0, 1, alpha=0.5, color='r',
... linestyle='--', label='Beats')
>>> plt.legend(frameon=True, framealpha=0.75)
>>> # Limit the plot to a 15-second window
>>> plt.xlim(15, 30)
>>> plt.gca().xaxis.set_major_formatter(librosa.display.TimeFormatter())
>>> plt.tight_layout()
'''
# First, get the frame->beat strength profile if we don't already have one
if onset_envelope is None:
if y is None:
raise ParameterError('y or onset_envelope must be provided')
onset_envelope = onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length,
aggregate=np.median)
# Do we have any onsets to grab?
if not onset_envelope.any():
return (0, np.array([], dtype=int))
# Estimate BPM if one was not provided
if bpm is None:
bpm = estimate_tempo(onset_envelope,
sr=sr,
hop_length=hop_length,
start_bpm=start_bpm)
# Then, run the tracker
beats = __beat_tracker(onset_envelope,
bpm,
float(sr) / hop_length,
tightness,
trim)
if units == 'frames':
pass
elif units == 'samples':
beats = core.frames_to_samples(beats, hop_length=hop_length)
elif units == 'time':
beats = core.frames_to_time(beats, hop_length=hop_length, sr=sr)
else:
raise ParameterError('Invalid unit type: {}'.format(units))
return (bpm, beats)
@cache(level=30)
def estimate_tempo(onset_envelope, sr=22050, hop_length=512, start_bpm=120,
std_bpm=1.0, ac_size=4.0, duration=90.0, offset=0.0):
"""Estimate the tempo (beats per minute) from an onset envelope
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
sr : number > 0 [scalar]
sampling rate of the time series
hop_length : int > 0 [scalar]
hop length of the time series
start_bpm : float [scalar]
initial guess of the BPM
std_bpm : float > 0 [scalar]
standard deviation of tempo distribution
ac_size : float > 0 [scalar]
length (in seconds) of the auto-correlation window
duration : float > 0 [scalar]
length of signal (in seconds) to use in estimating tempo
offset : float > 0 [scalar]
offset (in seconds) of signal sample to use in estimating tempo
Returns
-------
tempo : float [scalar]
estimated tempo (beats per minute)
See Also
--------
librosa.onset.onset_strength
Notes
-----
This function caches at level 30.
Examples
--------
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> onset_env = librosa.onset.onset_strength(y, sr=sr)
>>> tempo = librosa.beat.estimate_tempo(onset_env, sr=sr)
>>> tempo
103.359375
Plot the estimated tempo against the onset autocorrelation
>>> import matplotlib.pyplot as plt
>>> # Compute 2-second windowed autocorrelation
>>> hop_length = 512
>>> ac = librosa.autocorrelate(onset_env, 2 * sr // hop_length)
>>> freqs = librosa.tempo_frequencies(len(ac), sr=sr,
... hop_length=hop_length)
>>> # Plot on a BPM axis. We skip the first (0-lag) bin.
>>> plt.figure(figsize=(8,4))
>>> plt.semilogx(freqs[1:], librosa.util.normalize(ac)[1:],
... label='Onset autocorrelation', basex=2)
>>> plt.axvline(tempo, 0, 1, color='r', alpha=0.75, linestyle='--',
... label='Tempo: {:.2f} BPM'.format(tempo))
>>> plt.xlabel('Tempo (BPM)')
>>> plt.grid()
>>> plt.legend(frameon=True)
>>> plt.axis('tight')
"""
if start_bpm <= 0:
raise ParameterError('start_bpm must be strictly positive')
fft_res = float(sr) / hop_length
# Chop onsets to X[(upper_limit - duration):upper_limit]
# or as much as will fit
maxcol = int(min(len(onset_envelope)-1,
np.round((offset + duration) * fft_res)))
mincol = int(max(0, maxcol - np.round(duration * fft_res)))
# Use auto-correlation out of 4 seconds (empirically set??)
ac_window = min(maxcol, np.round(ac_size * fft_res))
# Compute the autocorrelation
x_corr = core.autocorrelate(onset_envelope[mincol:maxcol], ac_window)[1:]
# Get the BPM values for each bin, skipping the 0-lag bin
bpms = core.tempo_frequencies(ac_window, hop_length=hop_length, sr=sr)[1:]
# Weight the autocorrelation by a log-normal distribution centered start_bpm
x_corr *= np.exp(-0.5 * ((np.log2(bpms) - np.log2(start_bpm)) / std_bpm)**2)
# Get the local maximum of weighted correlation
x_peaks = util.localmax(x_corr)
# Zero out all peaks before the first negative
x_peaks[:np.argmax(x_corr < 0)] = False
# Choose the best peak out of .33, .5, 2, 3 * start_period
candidates = np.argmax(x_peaks * x_corr) * np.asarray([1./3, 0.5, 1, 2, 3])
candidates = candidates[candidates < ac_window].astype(int)
best_period = np.argmax(x_corr[candidates])
if candidates[best_period] > 0:
return bpms[candidates[best_period]]
return start_bpm
def __beat_tracker(onset_envelope, bpm, fft_res, tightness, trim):
"""Internal function that tracks beats in an onset strength envelope.
Parameters
----------
onset_envelope : np.ndarray [shape=(n,)]
onset strength envelope
bpm : float [scalar]
tempo estimate
fft_res : float [scalar]
resolution of the fft (sr / hop_length)
tightness: float [scalar]
how closely do we adhere to bpm?
trim : bool [scalar]
trim leading/trailing beats with weak onsets?
Returns
-------
beats : np.ndarray [shape=(n,)]
frame numbers of beat events
"""
if bpm <= 0:
raise ParameterError('bpm must be strictly positive')
# convert bpm to a sample period for searching
period = round(60.0 * fft_res / bpm)
# localscore is a smoothed version of AGC'd onset envelope
localscore = __beat_local_score(onset_envelope, period)
# run the DP
backlink, cumscore = __beat_track_dp(localscore, period, tightness)
# get the position of the last beat
beats = [__last_beat(cumscore)]
# Reconstruct the beat path from backlinks
while backlink[beats[-1]] >= 0:
beats.append(backlink[beats[-1]])
# Put the beats in ascending order
# Convert into an array of frame numbers
beats = np.array(beats[::-1], dtype=int)
# Discard spurious trailing beats
beats = __trim_beats(localscore, beats, trim)
return beats
# -- Helper functions for beat tracking
def __normalize_onsets(onsets):
'''Maps onset strength function into the range [0, 1]'''
norm = onsets.std(ddof=1)
if norm > 0:
onsets = onsets / norm
return onsets
def __beat_local_score(onset_envelope, period):
'''Construct the local score for an onset envlope and given period'''
window = np.exp(-0.5 * (np.arange(-period, period+1)*32.0/period)**2)
return scipy.signal.convolve(__normalize_onsets(onset_envelope),
window,
'same')
def __beat_track_dp(localscore, period, tightness):
"""Core dynamic program for beat tracking"""
backlink = np.zeros_like(localscore, dtype=int)
cumscore = np.zeros_like(localscore)
# Search range for previous beat
window = np.arange(-2 * period, -np.round(period / 2) + 1, dtype=int)
# Make a score window, which begins biased toward start_bpm and skewed
if tightness <= 0:
raise ParameterError('tightness must be strictly positive')
txwt = -tightness * (np.log(-window / period) ** 2)
# Are we on the first beat?
first_beat = True
for i, score_i in enumerate(localscore):
# Are we reaching back before time 0?
z_pad = np.maximum(0, min(- window[0], len(window)))
# Search over all possible predecessors
candidates = txwt.copy()
candidates[z_pad:] = candidates[z_pad:] + cumscore[window[z_pad:]]
# Find the best preceding beat
beat_location = np.argmax(candidates)
# Add the local score
cumscore[i] = score_i + candidates[beat_location]
# Special case the first onset. Stop if the localscore is small
if first_beat and score_i < 0.01 * localscore.max():
backlink[i] = -1
else:
backlink[i] = window[beat_location]
first_beat = False
# Update the time range
window = window + 1
return backlink, cumscore
def __last_beat(cumscore):
"""Get the last beat from the cumulative score array"""
maxes = util.localmax(cumscore)
med_score = np.median(cumscore[np.argwhere(maxes)])
# The last of these is the last beat (since score generally increases)
return np.argwhere((cumscore * maxes * 2 > med_score)).max()
def __trim_beats(localscore, beats, trim):
"""Final post-processing: throw out spurious leading/trailing beats"""
smooth_boe = scipy.signal.convolve(localscore[beats],
scipy.signal.hann(5),
'same')
if trim:
threshold = 0.5 * ((smooth_boe**2).mean()**0.5)
else:
threshold = 0.0
valid = np.argwhere(smooth_boe > threshold)
return beats[valid.min():valid.max()]
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.learning.training_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework.test_util import TensorFlowTestCase
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.training import training_ops
class TrainingOpsTest(TensorFlowTestCase):
def _toType(self, dtype):
if dtype == np.float32:
return tf.float32
elif dtype == np.float64:
return tf.float64
elif dtype == np.int32:
return tf.int32
elif dtype == np.int64:
return tf.int64
else:
assert False, (dtype)
def _testTypes(self, x, alpha, delta, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
variables.initialize_all_variables().run()
self.assertAllEqual(x, var.eval())
apply_sgd = training_ops.apply_gradient_descent(var, alpha, delta)
out = apply_sgd.eval()
self.assertShapeEqual(out, apply_sgd)
self.assertAllEqual(x - alpha * delta, out)
def testApplyGradientDescent(self):
for (dtype, use_gpu) in itertools.product(
[np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
alpha = np.array(2.0).astype(dtype)
delta = np.arange(100).astype(dtype)
self._testTypes(x, alpha, delta, use_gpu)
def _testTypesForAdagrad(self, x, y, lr, grad, use_gpu=None):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
variables.initialize_all_variables().run()
self.assertAllEqual(x, var.eval())
apply_adagrad = training_ops.apply_adagrad(var, accum, lr, grad)
out = apply_adagrad.eval()
self.assertShapeEqual(out, apply_adagrad)
self.assertAllClose(
x - lr * grad * (y + grad * grad) ** (-0.5), out)
self.assertAllEqual(y + grad * grad, accum.eval())
def _testTypesForFtrl(self, x, y, z, lr, grad, use_gpu=None, l1=0.0,
l2=0.0, lr_power=-0.5):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var = variables.Variable(x)
accum = variables.Variable(y)
linear = variables.Variable(z)
variables.initialize_all_variables().run()
self.assertAllEqual(x, var.eval())
apply_ftrl = training_ops.apply_ftrl(var, accum, linear, grad, lr, l1, l2,
lr_power)
out = apply_ftrl.eval()
self.assertShapeEqual(out, apply_ftrl)
accum_update = y + grad * grad
linear_update = z + grad - (accum_update ** (-lr_power) - y ** (
-lr_power)) / lr * x
quadratic = 1.0 / (accum_update ** (lr_power) * lr) + 2 * l2
expected_out = np.array([(np.sign(
linear_update[i]) * l1 - linear_update[i]) / (
quadratic[i]) if np.abs(
linear_update[i]) > l1 else 0.0 for i in range(
linear_update.size)])
self.assertAllClose(accum_update, accum.eval())
self.assertAllClose(linear_update, linear.eval())
self.assertAllClose(expected_out, out)
def testApplyAdagrad(self):
for (dtype, use_gpu) in itertools.product(
[np.float32, np.float64], [False, True]):
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdagrad(x, y, lr, grad, use_gpu)
def testApplyFtrl(self):
for dtype in [np.float32, np.float64]:
x = np.arange(100).astype(dtype)
y = np.arange(1, 101).astype(dtype)
z = np.arange(102, 202).astype(dtype)
lr = np.array(2.0).astype(dtype)
l1 = np.array(3.0).astype(dtype)
l2 = np.array(4.0).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForFtrl(x, y, z, lr, grad, use_gpu=False, l1=l1, l2=l2)
def _testTypesForSparseAdagrad(self, x, y, lr, grad, indices):
self.setUp()
with self.test_session(use_gpu=False):
var = variables.Variable(x)
accum = variables.Variable(y)
variables.initialize_all_variables().run()
self.assertAllEqual(x, var.eval())
sparse_apply_adagrad = training_ops.sparse_apply_adagrad(
var, accum, lr, grad,
constant_op.constant(indices, self._toType(indices.dtype)))
out = sparse_apply_adagrad.eval()
self.assertShapeEqual(out, sparse_apply_adagrad)
for (i, index) in enumerate(indices):
self.assertAllClose(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (-0.5),
var.eval()[index])
self.assertAllClose(y[index] + grad[i] * grad[i], accum.eval()[index])
def _testTypesForSparseFtrl(self, x, y, z, lr, grad, indices, l1=0.0, l2=0.0,
lr_power=-0.5):
self.setUp()
with self.test_session(use_gpu=False):
var = variables.Variable(x)
accum = variables.Variable(y)
linear = variables.Variable(z)
variables.initialize_all_variables().run()
self.assertAllEqual(x, var.eval())
sparse_apply_ftrl = training_ops.sparse_apply_ftrl(
var, accum, linear, grad,
constant_op.constant(indices, self._toType(indices.dtype)),
lr, l1, l2, lr_power=lr_power)
out = sparse_apply_ftrl.eval()
self.assertShapeEqual(out, sparse_apply_ftrl)
for (i, index) in enumerate(indices):
self.assertAllClose(
x[index] - lr * grad[i] * (y[index] + grad[i] * grad[i]) ** (
lr_power),
var.eval()[index])
self.assertAllEqual(y[index] + grad[i] * grad[i], accum.eval()[index])
def testSparseApplyAdagrad(self):
for (dtype, index_type) in itertools.product(
[np.float32, np.float64], [np.int32, np.int64]):
x_val = [np.arange(10), np.arange(10, 20), np.arange(20, 30)]
y_val = [np.arange(1, 11), np.arange(11, 21), np.arange(21, 31)]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [np.arange(10), np.arange(10)]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
def testSparseApplyAdagradDim1(self):
for (dtype, index_type) in itertools.product(
[np.float32, np.float64], [np.int32, np.int64]):
x_val = [[1.0], [2.0], [3.0]]
y_val = [[4.0], [5.0], [6.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseAdagrad(x, y, lr, grad, indices)
def testSparseApplyFtrlDim1(self):
for (dtype, index_type) in itertools.product(
[np.float32, np.float64], [np.int32, np.int64]):
x_val = [[0.0], [0.0], [0.0]]
y_val = [[4.0], [5.0], [6.0]]
z_val = [[0.0], [0.0], [0.0]]
x = np.array(x_val).astype(dtype)
y = np.array(y_val).astype(dtype)
z = np.array(z_val).astype(dtype)
lr = np.array(2.0).astype(dtype)
grad_val = [[1.5], [2.5]]
grad = np.array(grad_val).astype(dtype)
indices = np.array([0, 2]).astype(index_type)
self._testTypesForSparseFtrl(x, y, z, lr, grad, indices)
def testApplyAdam(self):
for dtype, use_gpu in itertools.product(
[np.float32, np.float64], [False, True]):
var = np.arange(100).astype(dtype)
m = np.arange(1, 101).astype(dtype)
v = np.arange(101, 201).astype(dtype)
grad = np.arange(100).astype(dtype)
self._testTypesForAdam(var, m, v, grad, use_gpu)
def _testTypesForAdam(self, var, m, v, grad, use_gpu):
self.setUp()
with self.test_session(use_gpu=use_gpu):
var_t = variables.Variable(var)
m_t = variables.Variable(m)
v_t = variables.Variable(v)
t = 1
beta1 = np.array(0.9, dtype=var.dtype)
beta2 = np.array(0.999, dtype=var.dtype)
beta1_power = beta1**t
beta2_power = beta2**t
lr = np.array(0.001, dtype=var.dtype)
epsilon = np.array(1e-8, dtype=var.dtype)
beta1_t = constant_op.constant(beta1, self._toType(var.dtype), [])
beta2_t = constant_op.constant(beta2, self._toType(var.dtype), [])
beta1_power_t = variables.Variable(beta1_power)
beta2_power_t = variables.Variable(beta2_power)
lr_t = constant_op.constant(lr, self._toType(var.dtype), [])
epsilon_t = constant_op.constant(epsilon, self._toType(var.dtype), [])
variables.initialize_all_variables().run()
self.assertAllEqual(var, var_t.eval())
new_var, _, _ = self._adamUpdateNumpy(var, grad, t, m, v,
lr, beta1, beta2, epsilon)
apply_adam = training_ops.apply_adam(var_t, m_t, v_t, beta1_power_t,
beta2_power_t, lr_t,
beta1_t, beta2_t, epsilon_t, grad)
out = apply_adam.eval()
self.assertShapeEqual(out, apply_adam)
self.assertAllClose(new_var, out)
def _adamUpdateNumpy(self, param, g_t, t, m, v, alpha, beta1,
beta2, epsilon):
alpha_t = alpha * np.sqrt(1 - beta2 ** t) / (1 - beta1 ** t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
if __name__ == '__main__':
googletest.main()
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations(object):
"""ConnectionMonitorsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_06_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.ConnectionMonitor"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorResult"]
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitor
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorResult"
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2019_06_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
def _start_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def begin_start(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
def _query_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.ConnectionMonitorQueryResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def begin_query(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
connection_monitor_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.ConnectionMonitorQueryResult"]
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.ConnectionMonitorListResult"]
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_06_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
|
|
"""Simplify, correct, and consolidate network topology."""
import logging as lg
import geopandas as gpd
import networkx as nx
from shapely.geometry import LineString
from shapely.geometry import MultiPolygon
from shapely.geometry import Point
from shapely.geometry import Polygon
from . import stats
from . import utils
from . import utils_graph
def _is_endpoint(G, node, strict=True):
"""
Is node a true endpoint of an edge.
Return True if the node is a "real" endpoint of an edge in the network,
otherwise False. OSM data includes lots of nodes that exist only as points
to help streets bend around curves. An end point is a node that either:
1) is its own neighbor, ie, it self-loops.
2) or, has no incoming edges or no outgoing edges, ie, all its incident
edges point inward or all its incident edges point outward.
3) or, it does not have exactly two neighbors and degree of 2 or 4.
4) or, if strict mode is false, if its edges have different OSM IDs.
Parameters
----------
G : networkx.MultiDiGraph
input graph
node : int
the node to examine
strict : bool
if False, allow nodes to be end points even if they fail all other rules
but have edges with different OSM IDs
Returns
-------
bool
"""
neighbors = set(list(G.predecessors(node)) + list(G.successors(node)))
n = len(neighbors)
d = G.degree(node)
# rule 1
if node in neighbors:
# if the node appears in its list of neighbors, it self-loops
# this is always an endpoint.
return True
# rule 2
elif G.out_degree(node) == 0 or G.in_degree(node) == 0:
# if node has no incoming edges or no outgoing edges, it is an endpoint
return True
# rule 3
elif not (n == 2 and (d == 2 or d == 4)):
# else, if it does NOT have 2 neighbors AND either 2 or 4 directed
# edges, it is an endpoint. either it has 1 or 3+ neighbors, in which
# case it is a dead-end or an intersection of multiple streets or it has
# 2 neighbors but 3 degree (indicating a change from oneway to twoway)
# or more than 4 degree (indicating a parallel edge) and thus is an
# endpoint
return True
# rule 4
elif not strict:
# non-strict mode: do its incident edges have different OSM IDs?
osmids = []
# add all the edge OSM IDs for incoming edges
for u in G.predecessors(node):
for key in G[u][node]:
osmids.append(G.edges[u, node, key]["osmid"])
# add all the edge OSM IDs for outgoing edges
for v in G.successors(node):
for key in G[node][v]:
osmids.append(G.edges[node, v, key]["osmid"])
# if there is more than 1 OSM ID in the list of edge OSM IDs then it is
# an endpoint, if not, it isn't
return len(set(osmids)) > 1
# if none of the preceding rules returned true, then it is not an endpoint
else:
return False
def _build_path(G, endpoint, endpoint_successor, endpoints):
"""
Build a path of nodes from one endpoint node to next endpoint node.
Parameters
----------
G : networkx.MultiDiGraph
input graph
endpoint : int
the endpoint node from which to start the path
endpoint_successor : int
the successor of endpoint through which the path to the next endpoint
will be built
endpoints : set
the set of all nodes in the graph that are endpoints
Returns
-------
path : list
the first and last items in the resulting path list are endpoint
nodes, and all other items are interstitial nodes that can be removed
subsequently
"""
# start building path from endpoint node through its successor
path = [endpoint, endpoint_successor]
# for each successor of the endpoint's successor
for successor in G.successors(endpoint_successor):
if successor not in path:
# if this successor is already in the path, ignore it, otherwise add
# it to the path
path.append(successor)
while successor not in endpoints:
# find successors (of current successor) not in path
successors = [n for n in G.successors(successor) if n not in path]
# 99%+ of the time there will be only 1 successor: add to path
if len(successors) == 1:
successor = successors[0]
path.append(successor)
# handle relatively rare cases or OSM digitization quirks
elif len(successors) == 0:
if endpoint in G.successors(successor):
# we have come to the end of a self-looping edge, so
# add first node to end of path to close it and return
return path + [endpoint]
else: # pragma: no cover
# this can happen due to OSM digitization error where
# a one-way street turns into a two-way here, but
# duplicate incoming one-way edges are present
utils.log(
f"Unexpected simplify pattern handled near {successor}", level=lg.WARN
)
return path
else: # pragma: no cover
# if successor has >1 successors, then successor must have
# been an endpoint because you can go in 2 new directions.
# this should never occur in practice
raise Exception(f"Unexpected simplify pattern failed near {successor}")
# if this successor is an endpoint, we've completed the path
return path
# if endpoint_successor has no successors not already in the path, return
# the current path: this is usually due to a digitization quirk on OSM
return path
def _get_paths_to_simplify(G, strict=True):
"""
Generate all the paths to be simplified between endpoint nodes.
The path is ordered from the first endpoint, through the interstitial nodes,
to the second endpoint.
Parameters
----------
G : networkx.MultiDiGraph
input graph
strict : bool
if False, allow nodes to be end points even if they fail all other rules
but have edges with different OSM IDs
Yields
------
path_to_simplify : list
"""
# first identify all the nodes that are endpoints
endpoints = set([n for n in G.nodes if _is_endpoint(G, n, strict=strict)])
utils.log(f"Identified {len(endpoints)} edge endpoints")
# for each endpoint node, look at each of its successor nodes
for endpoint in endpoints:
for successor in G.successors(endpoint):
if successor not in endpoints:
# if endpoint node's successor is not an endpoint, build path
# from the endpoint node, through the successor, and on to the
# next endpoint node
yield _build_path(G, endpoint, successor, endpoints)
def simplify_graph(G, strict=True, remove_rings=True):
"""
Simplify a graph's topology by removing interstitial nodes.
Simplifies graph topology by removing all nodes that are not intersections
or dead-ends. Create an edge directly between the end points that
encapsulate them, but retain the geometry of the original edges, saved as
a new `geometry` attribute on the new edge. Note that only simplified
edges receive a `geometry` attribute. Some of the resulting consolidated
edges may comprise multiple OSM ways, and if so, their multiple attribute
values are stored as a list.
Parameters
----------
G : networkx.MultiDiGraph
input graph
strict : bool
if False, allow nodes to be end points even if they fail all other
rules but have incident edges with different OSM IDs. Lets you keep
nodes at elbow two-way intersections, but sometimes individual blocks
have multiple OSM IDs within them too.
remove_rings : bool
if True, remove isolated self-contained rings that have no endpoints
Returns
-------
G : networkx.MultiDiGraph
topologically simplified graph, with a new `geometry` attribute on
each simplified edge
"""
if "simplified" in G.graph and G.graph["simplified"]: # pragma: no cover
raise Exception("This graph has already been simplified, cannot simplify it again.")
utils.log("Begin topologically simplifying the graph...")
# define edge segment attributes to sum upon edge simplification
attrs_to_sum = {"length", "travel_time"}
# make a copy to not mutate original graph object caller passed in
G = G.copy()
initial_node_count = len(G)
initial_edge_count = len(G.edges)
all_nodes_to_remove = []
all_edges_to_add = []
# generate each path that needs to be simplified
for path in _get_paths_to_simplify(G, strict=strict):
# add the interstitial edges we're removing to a list so we can retain
# their spatial geometry
path_attributes = dict()
for u, v in zip(path[:-1], path[1:]):
# there should rarely be multiple edges between interstitial nodes
# usually happens if OSM has duplicate ways digitized for just one
# street... we will keep only one of the edges (see below)
edge_count = G.number_of_edges(u, v)
if edge_count != 1:
utils.log(f"Found {edge_count} edges between {u} and {v} when simplifying")
# get edge between these nodes: if multiple edges exist between
# them (see above), we retain only one in the simplified graph
edge_data = G.edges[u, v, 0]
for attr in edge_data:
if attr in path_attributes:
# if this key already exists in the dict, append it to the
# value list
path_attributes[attr].append(edge_data[attr])
else:
# if this key doesn't already exist, set the value to a list
# containing the one value
path_attributes[attr] = [edge_data[attr]]
# consolidate the path's edge segments' attribute values
for attr in path_attributes:
if attr in attrs_to_sum:
# if this attribute must be summed, sum it now
path_attributes[attr] = sum(path_attributes[attr])
elif len(set(path_attributes[attr])) == 1:
# if there's only 1 unique value in this attribute list,
# consolidate it to the single value (the zero-th):
path_attributes[attr] = path_attributes[attr][0]
else:
# otherwise, if there are multiple values, keep one of each
path_attributes[attr] = list(set(path_attributes[attr]))
# construct the new consolidated edge's geometry for this path
path_attributes["geometry"] = LineString(
[Point((G.nodes[node]["x"], G.nodes[node]["y"])) for node in path]
)
# add the nodes and edge to their lists for processing at the end
all_nodes_to_remove.extend(path[1:-1])
all_edges_to_add.append(
{"origin": path[0], "destination": path[-1], "attr_dict": path_attributes}
)
# for each edge to add in the list we assembled, create a new edge between
# the origin and destination
for edge in all_edges_to_add:
G.add_edge(edge["origin"], edge["destination"], **edge["attr_dict"])
# finally remove all the interstitial nodes between the new edges
G.remove_nodes_from(set(all_nodes_to_remove))
if remove_rings:
# remove any connected components that form a self-contained ring
# without any endpoints
wccs = nx.weakly_connected_components(G)
nodes_in_rings = set()
for wcc in wccs:
if not any(_is_endpoint(G, n) for n in wcc):
nodes_in_rings.update(wcc)
G.remove_nodes_from(nodes_in_rings)
# mark graph as having been simplified
G.graph["simplified"] = True
msg = (
f"Simplified graph: {initial_node_count} to {len(G)} nodes, "
f"{initial_edge_count} to {len(G.edges)} edges"
)
utils.log(msg)
return G
def consolidate_intersections(
G, tolerance=10, rebuild_graph=True, dead_ends=False, reconnect_edges=True
):
"""
Consolidate intersections comprising clusters of nearby nodes.
Merges nearby nodes and returns either their centroids or a rebuilt graph
with consolidated intersections and reconnected edge geometries. The
tolerance argument should be adjusted to approximately match street design
standards in the specific street network, and you should always use a
projected graph to work in meaningful and consistent units like meters.
When rebuild_graph=False, it uses a purely geometrical (and relatively
fast) algorithm to identify "geometrically close" nodes, merge them, and
return just the merged intersections' centroids. When rebuild_graph=True,
it uses a topological (and slower but more accurate) algorithm to identify
"topologically close" nodes, merge them, then rebuild/return the graph.
Returned graph's node IDs represent clusters rather than osmids. Refer to
nodes' osmid_original attributes for original osmids. If multiple nodes
were merged together, the osmid_original attribute is a list of merged
nodes' osmids.
Divided roads are often represented by separate centerline edges. The
intersection of two divided roads thus creates 4 nodes, representing where
each edge intersects a perpendicular edge. These 4 nodes represent a
single intersection in the real world. A similar situation occurs with
roundabouts and traffic circles. This function consolidates nearby nodes
by buffering them to an arbitrary distance, merging overlapping buffers,
and taking their centroid.
Parameters
----------
G : networkx.MultiDiGraph
a projected graph
tolerance : float
nodes are buffered to this distance (in graph's geometry's units) and
subsequent overlaps are dissolved into a single node
rebuild_graph : bool
if True, consolidate the nodes topologically, rebuild the graph, and
return as networkx.MultiDiGraph. if False, consolidate the nodes
geometrically and return the consolidated node points as
geopandas.GeoSeries
dead_ends : bool
if False, discard dead-end nodes to return only street-intersection
points
reconnect_edges : bool
ignored if rebuild_graph is not True. if True, reconnect edges and
their geometries in rebuilt graph to the consolidated nodes and update
edge length attributes; if False, returned graph has no edges (which
is faster if you just need topologically consolidated intersection
counts).
Returns
-------
networkx.MultiDiGraph or geopandas.GeoSeries
if rebuild_graph=True, returns MultiDiGraph with consolidated
intersections and reconnected edge geometries. if rebuild_graph=False,
returns GeoSeries of shapely Points representing the centroids of
street intersections
"""
# if dead_ends is False, discard dead-ends to retain only intersections
if not dead_ends:
spn = stats.streets_per_node(G)
dead_end_nodes = [node for node, count in spn.items() if count <= 1]
# make a copy to not mutate original graph object caller passed in
G = G.copy()
G.remove_nodes_from(dead_end_nodes)
if rebuild_graph:
if not G or not G.edges:
# cannot rebuild a graph with no nodes or no edges, just return it
return G
else:
return _consolidate_intersections_rebuild_graph(G, tolerance, reconnect_edges)
else:
if not G:
# if graph has no nodes, just return empty GeoSeries
return gpd.GeoSeries(crs=G.graph["crs"])
else:
# return the centroids of the merged intersection polygons
return _merge_nodes_geometric(G, tolerance).centroid
def _merge_nodes_geometric(G, tolerance):
"""
Geometrically merge nodes within some distance of each other.
Parameters
----------
G : networkx.MultiDiGraph
a projected graph
tolerance : float
buffer nodes to this distance (in graph's geometry's units) then merge
overlapping polygons into a single polygon via a unary union operation
Returns
-------
merged : GeoSeries
the merged overlapping polygons of the buffered nodes
"""
# buffer nodes GeoSeries then get unary union to merge overlaps
merged = utils_graph.graph_to_gdfs(G, edges=False)["geometry"].buffer(tolerance).unary_union
# if only a single node results, make it iterable to convert to GeoSeries
merged = MultiPolygon([merged]) if isinstance(merged, Polygon) else merged
return gpd.GeoSeries(merged.geoms, crs=G.graph["crs"])
def _consolidate_intersections_rebuild_graph(G, tolerance=10, reconnect_edges=True):
"""
Consolidate intersections comprising clusters of nearby nodes.
Merge nodes and return a rebuilt graph with consolidated intersections and
reconnected edge geometries.
The tolerance argument should be adjusted to approximately match street
design standards in the specific street network, and you should always use
a projected graph to work in meaningful and consistent units like meters.
Returned graph's node IDs represent clusters rather than osmids. Refer to
nodes' osmid_original attributes for original osmids. If multiple nodes
were merged together, the osmid_original attribute is a list of merged
nodes' osmids.
Parameters
----------
G : networkx.MultiDiGraph
a projected graph
tolerance : float
nodes are buffered to this distance (in graph's geometry's units) and
subsequent overlaps are dissolved into a single node
reconnect_edges : bool
ignored if rebuild_graph is not True. if True, reconnect edges and
their geometries in rebuilt graph to the consolidated nodes and update
edge length attributes; if False, returned graph has no edges (which
is faster if you just need topologically consolidated intersection
counts).
Returns
-------
H : networkx.MultiDiGraph
a rebuilt graph with consolidated intersections and reconnected
edge geometries
"""
# STEP 1
# buffer nodes to passed-in distance and merge overlaps. turn merged nodes
# into gdf and get centroids of each cluster as x, y
node_clusters = gpd.GeoDataFrame(geometry=_merge_nodes_geometric(G, tolerance))
centroids = node_clusters.centroid
node_clusters["x"] = centroids.x
node_clusters["y"] = centroids.y
# STEP 2
# attach each node to its cluster of merged nodes. first get the original
# graph's node points then spatial join to give each node the label of
# cluster it's within
node_points = utils_graph.graph_to_gdfs(G, edges=False)[["geometry"]]
gdf = gpd.sjoin(node_points, node_clusters, how="left", predicate="within")
gdf = gdf.drop(columns="geometry").rename(columns={"index_right": "cluster"})
# STEP 3
# if a cluster contains multiple components (i.e., it's not connected)
# move each component to its own cluster (otherwise you will connect
# nodes together that are not truly connected, e.g., nearby deadends or
# surface streets with bridge).
groups = gdf.groupby("cluster")
for cluster_label, nodes_subset in groups:
if len(nodes_subset) > 1:
# identify all the (weakly connected) component in cluster
wccs = list(nx.weakly_connected_components(G.subgraph(nodes_subset.index)))
if len(wccs) > 1:
# if there are multiple components in this cluster
suffix = 0
for wcc in wccs:
# set subcluster xy to the centroid of just these nodes
subcluster_centroid = node_points.loc[wcc].unary_union.centroid
gdf.loc[wcc, "x"] = subcluster_centroid.x
gdf.loc[wcc, "y"] = subcluster_centroid.y
# move to subcluster by appending suffix to cluster label
gdf.loc[wcc, "cluster"] = f"{cluster_label}-{suffix}"
suffix += 1
# give nodes unique integer IDs (subclusters with suffixes are strings)
gdf["cluster"] = gdf["cluster"].factorize()[0]
# STEP 4
# create new empty graph and copy over misc graph data
H = nx.MultiDiGraph()
H.graph = G.graph
# STEP 5
# create a new node for each cluster of merged nodes
# regroup now that we potentially have new cluster labels from step 3
groups = gdf.groupby("cluster")
for cluster_label, nodes_subset in groups:
osmids = nodes_subset.index.to_list()
if len(osmids) == 1:
# if cluster is a single node, add that node to new graph
osmid = osmids[0]
H.add_node(cluster_label, osmid_original=osmid, **G.nodes[osmid])
else:
# if cluster is multiple merged nodes, create one new node to
# represent them
H.add_node(
cluster_label,
osmid_original=str(osmids),
x=nodes_subset["x"].iloc[0],
y=nodes_subset["y"].iloc[0],
)
# calculate street_count attribute for all nodes lacking it
null_nodes = [n for n, sc in H.nodes(data="street_count") if sc is None]
street_count = stats.count_streets_per_node(H, nodes=null_nodes)
nx.set_node_attributes(H, street_count, name="street_count")
if not G.edges or not reconnect_edges:
# if reconnect_edges is False or there are no edges in original graph
# (after dead-end removed), then skip edges and return new graph as-is
return H
# STEP 6
# create new edge from cluster to cluster for each edge in original graph
gdf_edges = utils_graph.graph_to_gdfs(G, nodes=False)
for u, v, k, data in G.edges(keys=True, data=True):
u2 = gdf.loc[u, "cluster"]
v2 = gdf.loc[v, "cluster"]
# only create the edge if we're not connecting the cluster
# to itself, but always add original self-loops
if (u2 != v2) or (u == v):
data["u_original"] = u
data["v_original"] = v
if "geometry" not in data:
data["geometry"] = gdf_edges.loc[(u, v, k), "geometry"]
H.add_edge(u2, v2, **data)
# STEP 7
# for every group of merged nodes with more than 1 node in it, extend the
# edge geometries to reach the new node point
for cluster_label, nodes_subset in groups:
# but only if there were multiple nodes merged together,
# otherwise it's the same old edge as in original graph
if len(nodes_subset) > 1:
# get coords of merged nodes point centroid to prepend or
# append to the old edge geom's coords
x = H.nodes[cluster_label]["x"]
y = H.nodes[cluster_label]["y"]
xy = [(x, y)]
# for each edge incident on this new merged node, update its
# geometry to extend to/from the new node's point coords
in_edges = set(H.in_edges(cluster_label, keys=True))
out_edges = set(H.out_edges(cluster_label, keys=True))
for u, v, k in in_edges | out_edges:
old_coords = list(H.edges[u, v, k]["geometry"].coords)
new_coords = xy + old_coords if cluster_label == u else old_coords + xy
new_geom = LineString(new_coords)
H.edges[u, v, k]["geometry"] = new_geom
# update the edge length attribute, given the new geometry
H.edges[u, v, k]["length"] = new_geom.length
return H
|
|
# coding: utf-8
#
# Copyright 2018 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Topic-related one-off jobs."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import logging
from constants import constants
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_jobs_one_off
from core.domain import topic_services
from core.platform import models
from core.tests import test_utils
import feconf
(topic_models,) = models.Registry.import_models([models.NAMES.topic])
class TopicMigrationOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
TOPIC_ID = 'topic_id'
MIGRATED_SUBTOPIC_DICT = {
'id': 1,
'skill_ids': ['skill_1'],
'thumbnail_bg_color': None,
'thumbnail_filename': None,
'title': 'A subtitle',
'url_fragment': 'subtitle'
}
def setUp(self):
super(TopicMigrationOneOffJobTests, self).setUp()
# Setup user who will own the test topics.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_tasks()
def test_migration_job_does_not_convert_up_to_date_topic(self):
"""Tests that the topic migration job does not convert a
topic that is already the latest schema version.
"""
# Create a new topic that should not be affected by the
# job.
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A name', 'abbrev', 'description')
topic.add_subtopic(1, 'A subtitle')
topic_services.save_new_topic(self.albert_id, topic)
self.assertEqual(
topic.subtopic_schema_version,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
# Start migration job.
job_id = (
topic_jobs_one_off.TopicMigrationOneOffJob.create_new())
topic_jobs_one_off.TopicMigrationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the topic is exactly the same after migration.
updated_topic = (
topic_fetchers.get_topic_by_id(self.TOPIC_ID))
self.assertEqual(
updated_topic.subtopic_schema_version,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
self.assertEqual(
topic.subtopics[0].to_dict(), updated_topic.subtopics[0].to_dict())
output = topic_jobs_one_off.TopicMigrationOneOffJob.get_output(job_id)
expected = [[u'topic_migrated',
[u'1 topics successfully migrated.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migration_job_skips_deleted_topic(self):
"""Tests that the topic migration job skips deleted topic
and does not attempt to migrate.
"""
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A name', 'abbrev', 'description')
topic_services.save_new_topic(self.albert_id, topic)
# Delete the topic before migration occurs.
topic_services.delete_topic(self.albert_id, self.TOPIC_ID)
# Ensure the topic is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
# Start migration job on sample topic.
job_id = (
topic_jobs_one_off.TopicMigrationOneOffJob.create_new())
topic_jobs_one_off.TopicMigrationOneOffJob.enqueue(job_id)
# This running without errors indicates the deleted topic is
# being ignored.
self.process_and_flush_pending_tasks()
# Ensure the topic is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
output = topic_jobs_one_off.TopicMigrationOneOffJob.get_output(job_id)
expected = [[u'topic_deleted',
[u'Encountered 1 deleted topics.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migration_job_converts_old_topic(self):
"""Tests that the schema conversion functions work
correctly and an old topic is converted to new
version.
"""
# Generate topic with old(v1) subtopic data.
self.save_new_topic_with_subtopic_schema_v1(
self.TOPIC_ID, self.albert_id, 'A name', 'abbrev', 'topic-one',
'a name', '', 'Image.svg', '#C6DCDA', [], [], [], 2)
topic_model = (
topic_models.TopicModel.get(self.TOPIC_ID))
self.assertEqual(topic_model.subtopic_schema_version, 1)
self.assertEqual(
topic_model.subtopics[0],
{
'id': 1,
'skill_ids': ['skill_1'],
'title': 'A subtitle'
})
topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(topic.subtopic_schema_version, 3)
self.assertEqual(
topic.subtopics[0].to_dict(),
self.MIGRATED_SUBTOPIC_DICT)
# Start migration job.
job_id = (
topic_jobs_one_off.TopicMigrationOneOffJob.create_new())
topic_jobs_one_off.TopicMigrationOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the topic migrates correctly.
updated_topic = (
topic_models.TopicModel.get(self.TOPIC_ID))
self.assertEqual(
updated_topic.subtopic_schema_version,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
updated_topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
updated_topic.subtopic_schema_version,
feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION)
self.assertEqual(
updated_topic.subtopics[0].to_dict(),
self.MIGRATED_SUBTOPIC_DICT)
output = topic_jobs_one_off.TopicMigrationOneOffJob.get_output(job_id)
expected = [[u'topic_migrated',
[u'1 topics successfully migrated.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_migration_job_fails_with_invalid_topic(self):
observed_log_messages = []
def _mock_logging_function(msg):
"""Mocks logging.error()."""
observed_log_messages.append(msg)
# The topic model created will be invalid due to invalid language code.
self.save_new_topic_with_subtopic_schema_v1(
self.TOPIC_ID, self.albert_id, 'A name', 'abbrev', 'topic-two',
'a name', 'description', 'Image.svg',
'#C6DCDA', [], [], [], 2,
language_code='invalid_language_code')
job_id = (
topic_jobs_one_off.TopicMigrationOneOffJob.create_new())
topic_jobs_one_off.TopicMigrationOneOffJob.enqueue(job_id)
with self.swap(logging, 'error', _mock_logging_function):
self.process_and_flush_pending_tasks()
self.assertEqual(
observed_log_messages,
['Topic topic_id failed validation: Invalid language code: '
'invalid_language_code'])
output = topic_jobs_one_off.TopicMigrationOneOffJob.get_output(job_id)
expected = [[u'validation_error',
[u'Topic topic_id failed validation: '
'Invalid language code: invalid_language_code']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
class RemoveDeletedSkillsFromTopicOneOffJobTests(
test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
TOPIC_ID = 'topic_id'
def setUp(self):
super(RemoveDeletedSkillsFromTopicOneOffJobTests, self).setUp()
# Setup user who will own the test topics.
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.process_and_flush_pending_tasks()
self.rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])]
def test_job_removes_deleted_uncategorized_skill_ids(self):
"""Tests that the RemoveDeletedSkillsFromTopicOneOffJob job removes
deleted uncategorized skills ids from the topic.
"""
valid_skill_1 = skill_domain.Skill.create_default_skill(
'valid_skill_1', 'A description', self.rubrics)
valid_skill_2 = skill_domain.Skill.create_default_skill(
'valid_skill_2', 'A description', self.rubrics)
valid_skill_3 = skill_domain.Skill.create_default_skill(
'valid_skill_3', 'A description', self.rubrics)
skill_services.save_new_skill(self.albert_id, valid_skill_1)
skill_services.save_new_skill(self.albert_id, valid_skill_2)
skill_services.save_new_skill(self.albert_id, valid_skill_3)
# Create a new topic that should not be affected by the
# job.
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A name', 'abbrev', 'description')
topic.add_subtopic(1, 'A subtitle')
topic.add_uncategorized_skill_id('valid_skill_1')
topic.add_uncategorized_skill_id('valid_skill_2')
topic.add_uncategorized_skill_id('valid_skill_3')
topic.add_uncategorized_skill_id('deleted_skill_1')
topic.add_uncategorized_skill_id('deleted_skill_2')
topic.add_uncategorized_skill_id('deleted_skill_3')
topic.move_skill_id_to_subtopic(None, 1, 'valid_skill_3')
topic.move_skill_id_to_subtopic(None, 1, 'deleted_skill_3')
topic_services.save_new_topic(self.albert_id, topic)
# Pre-assert that all skills are added correctly.
self.assertEqual(
set(topic.uncategorized_skill_ids),
set([
'valid_skill_1',
'valid_skill_2',
'deleted_skill_1',
'deleted_skill_2'
]))
self.assertEqual(
set(topic.subtopics[0].skill_ids),
set(['valid_skill_3', 'deleted_skill_3']))
# Start RemoveDeletedSkillsFromTopicOneOffJob.
job_id = (
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob
.create_new())
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_tasks()
# Assert that only valid skills remain after
# RemoveDeletedSkillsFromTopicOneOffJob.
updated_topic = topic_fetchers.get_topic_by_id(self.TOPIC_ID)
self.assertEqual(
updated_topic.uncategorized_skill_ids,
['valid_skill_1', 'valid_skill_2'])
self.assertEqual(
updated_topic.subtopics[0].skill_ids, ['valid_skill_3'])
output = (
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob
.get_output(job_id))
expected = [
[
u'Skill IDs deleted for topic topic_id:',
[u'[u\'deleted_skill_1\', u\'deleted_skill_2\','
' u\'deleted_skill_3\']']
],
[u'topic_processed', [u'Processed 1 topics.']]
]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_job_skips_deleted_topic(self):
"""Tests that RemoveDeletedSkillsFromTopicOneOffJob job skips
deleted topic and does not attempt to remove uncategorized skills for
skills that are deleted.
"""
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A name', 'abbrev', 'description')
topic.add_uncategorized_skill_id('skill_1')
topic.add_uncategorized_skill_id('skill_2')
topic_services.save_new_topic(self.albert_id, topic)
# Delete the topic before migration occurs.
topic_services.delete_topic(
self.albert_id, self.TOPIC_ID)
# Ensure the topic is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
# Start migration job on sample topic.
job_id = (
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob
.create_new())
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob.enqueue(
job_id)
# This running without errors indicates the deleted topic is
# being ignored.
self.process_and_flush_pending_tasks()
# Ensure the topic is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
output = (
topic_jobs_one_off.RemoveDeletedSkillsFromTopicOneOffJob
.get_output(job_id))
expected = [[u'topic_deleted',
[u'Encountered 1 deleted topics.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
class RegenerateTopicSummaryOneOffJobTests(test_utils.GenericTestBase):
ALBERT_EMAIL = 'albert@example.com'
ALBERT_NAME = 'albert'
def setUp(self):
super(RegenerateTopicSummaryOneOffJobTests, self).setUp()
self.signup(self.ALBERT_EMAIL, self.ALBERT_NAME)
self.albert_id = self.get_user_id_from_email(self.ALBERT_EMAIL)
self.TOPIC_ID = topic_services.get_new_topic_id()
self.process_and_flush_pending_tasks()
def test_job_skips_deleted_topic(self):
"""Tests that the regenerate summary job skips deleted topic."""
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A title', 'url-frag-one', 'description')
topic_services.save_new_topic(self.albert_id, topic)
topic_services.delete_topic(self.albert_id, self.TOPIC_ID)
# Ensure the topic is deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
# Start migration job on sample topic.
job_id = (
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.create_new())
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.enqueue(job_id)
# This running without errors indicates the deleted topic is
# being ignored.
self.process_and_flush_pending_tasks()
# Ensure the topic is still deleted.
with self.assertRaisesRegexp(Exception, 'Entity .* not found'):
topic_fetchers.get_topic_by_id(self.TOPIC_ID)
output = topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.get_output(
job_id)
expected = [[u'topic_deleted',
[u'Encountered 1 deleted topics.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_job_converts_old_topic_summary(self):
"""Tests that the one off job creates the new summary correctly."""
topic_model = topic_models.TopicModel(
id=self.TOPIC_ID,
name='Topic name',
abbreviated_name='Topic',
url_fragment='topic-frag',
thumbnail_bg_color='#C6DCDA',
thumbnail_filename='topic.svg',
canonical_name='topic name',
description='Topic description',
language_code='en',
canonical_story_references=[],
additional_story_references=[],
uncategorized_skill_ids=[],
subtopic_schema_version=feconf.CURRENT_SUBTOPIC_SCHEMA_VERSION,
story_reference_schema_version=(
feconf.CURRENT_STORY_REFERENCE_SCHEMA_VERSION),
next_subtopic_id=1,
subtopics=[]
)
commit_message = (
'New topic created with name \'Topic name\'.')
topic_models.TopicRightsModel(
id=self.TOPIC_ID,
manager_ids=[self.albert_id],
topic_is_published=True
).commit(
self.albert_id, 'Created new topic rights',
[{'cmd': topic_domain.CMD_CREATE_NEW}])
topic_model.commit(
self.albert_id, commit_message, [{
'cmd': topic_domain.CMD_CREATE_NEW,
'name': 'Topic name'
}])
# The topic summary model isn't created yet.
topic_summary_model = (
topic_models.TopicSummaryModel.get(self.TOPIC_ID, strict=False))
self.assertIsNone(topic_summary_model)
# Start migration job.
job_id = (
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.create_new())
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
# Verify the topic summary is created correctly.
topic_summary_model = (
topic_models.TopicSummaryModel.get(self.TOPIC_ID, strict=False))
self.assertEqual(
topic_summary_model.thumbnail_filename, 'topic.svg')
self.assertEqual(
topic_summary_model.thumbnail_bg_color, '#C6DCDA')
output = topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.get_output(
job_id)
expected = [[u'topic_processed',
[u'Successfully processed 1 topics.']]]
self.assertEqual(expected, [ast.literal_eval(x) for x in output])
def test_regeneration_job_skips_invalid_topic(self):
observed_log_messages = []
def _mock_get_topic_by_id(unused_topic_id):
"""Mocks get_topic_by_id()."""
return 'invalid_topic'
def _mock_logging_function(msg, *args):
"""Mocks logging.error()."""
observed_log_messages.append(msg % args)
topic = topic_domain.Topic.create_default_topic(
self.TOPIC_ID, 'A title', 'url-frag-two', 'description')
topic_services.save_new_topic(self.albert_id, topic)
get_topic_by_id_swap = self.swap(
topic_fetchers, 'get_topic_by_id', _mock_get_topic_by_id)
logging_exception_swap = self.swap(
logging, 'exception', _mock_logging_function)
with get_topic_by_id_swap, logging_exception_swap:
job_id = (
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.create_new())
topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.enqueue(job_id)
self.process_and_flush_pending_tasks()
output = topic_jobs_one_off.RegenerateTopicSummaryOneOffJob.get_output(
job_id)
self.assertEqual(
observed_log_messages,
[u'Failed to create topic summary %s: \'unicode\' '
'object has no attribute \'canonical_story_references\''
% topic.id])
for message in output:
self.assertRegexpMatches(
message,
'object has no attribute \'canonical_story_references\'')
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# __BEGIN_LICENSE__
# Copyright (c) 2009-2013, United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration. All
# rights reserved.
#
# The NGT platform is licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# __END_LICENSE__
'''
Tool for downloading IceBridge data
'''
import sys, os, re, subprocess, optparse, logging
import icebridge_common
logging.info('DEBUG')
logger = logging.getLogger(__name__)
# The path to the ASP python files
basepath = os.path.abspath(sys.path[0])
pythonpath = os.path.abspath(basepath + '/../Python') # for dev ASP
libexecpath = os.path.abspath(basepath + '/../libexec') # for packaged ASP
sys.path.insert(0, basepath) # prepend to Python path
sys.path.insert(0, pythonpath)
sys.path.insert(0, libexecpath)
import asp_system_utils
asp_system_utils.verify_python_version_is_supported()
# Prepend to system PATH
os.environ["PATH"] = libexecpath + os.pathsep + os.environ["PATH"]
os.environ["PATH"] = basepath + os.pathsep + os.environ["PATH"]
#------------------------------------------------------------------------------
# Constants
LIDAR_TYPES = ['atm1', 'atm2', 'lvis']
MAX_IN_ONE_CALL = 100 # when fetching in batches
def validateNavOrWipe(filename, logger):
'''If a nav file is not valid, wipe it.'''
if not os.path.exists(filename):
return False
try:
f = open(filename)
line = f.readline()
m = re.match("^.*?DOCTYPE\s+HTML", line)
if m:
logger.info("Bad nav data, will wipe: " + filename)
os.system("rm -f " + filename)
return False
except UnicodeDecodeError as e:
# If it cannot be decoded, the file is likely in binary, hence valid
return True
return True
def checkFound(filename):
'''Check if an HTML file has the 404 Not Found message.'''
if not os.path.exists(filename):
return False
with open(filename, 'r') as f:
for line in f:
m = re.match("<title>404 Not Found", line)
if m:
return False
return True
def checkIfUrlExists(url, baseCurlCmd):
'''Return true if the given IceBrige folder URL is valid. This
functionality, if done right, times out, likely to pecularities of
the NSIDC server. Hence, do a hack. Fetch the file first, and
check for whether it was found or not.'''
path = url.replace('/','_') # a temporary path
curlCmd = baseCurlCmd + ' ' + url + ' > ' + path
p = subprocess.Popen(curlCmd, shell=True, universal_newlines=True)
os.waitpid(p.pid, 0)
found = checkFound(path)
wipeCmd = "rm -f " + path
os.system(wipeCmd)
return found
def makeYearFolder(year, site):
'''Generate part of the URL. Only used for images.'''
return str(year) + '_' + site + '_NASA'
def makeDateFolder(year, month, day, ext, fileType):
'''Generate part of the URL.'''
if fileType == 'jpeg':
datePart = ('%02d%02d%04d%s') % (month, day, year, ext)
return datePart +'_raw'
else: # Used for all other cases
datePart = ('%04d.%02d.%02d%s') % (year, month, day, ext)
return datePart
def hasGoodLat(latitude, isSouth):
'''Return true if latitude and isSouth parameters match.'''
if (isSouth and latitude < 0) or ( (not isSouth) and latitude > 0 ):
return True
return False
def fetchAndParseIndexFileAux(isSouth, separateByLat, dayVal,
baseCurlCmd, folderUrl, path, fileType):
'''Retrieve the index file for a folder of data and create
a parsed version of it that contains frame number / filename pairs.'''
# Download the html file
curlCmd = baseCurlCmd + ' ' + folderUrl + ' > ' + path
logger.info(curlCmd)
p = subprocess.Popen(curlCmd, shell=True, universal_newlines=True)
os.waitpid(p.pid, 0)
# Find all the file names in the index file and
# dump them to a new index file
logger.info('Extracting file name list from index.html file...')
with open(path, 'r') as f:
indexText = f.read()
# Must wipe this html file. We fetch it too often in different
# contexts. If not wiped, the code fails to work in some
# very rare but real situations.
if os.path.exists(path):
os.remove(path)
# Extract just the file names
fileList = [] # ensure initialization
if fileType == 'jpeg':
fileList = re.findall(">[0-9_]*.JPG", indexText, re.IGNORECASE)
if fileType == 'ortho':
fileList = re.findall(">DMS\w*.tif<", indexText, re.IGNORECASE)
if fileType == 'fireball':
# Fireball DEMs
fileList = re.findall(">IODMS\w*DEM.tif", indexText, re.IGNORECASE)
if fileType == 'lvis':
fileList = re.findall(">ILVIS\w+.TXT", indexText, re.IGNORECASE)
if fileType == 'atm1':
fileList = re.findall(">ILATM1B[0-9_]*.ATM4\w+.qi", indexText, re.IGNORECASE)
# >ILATM1B_20111018_145455.ATM4BT4.qi
# or >ILATM1B_20091016_165112.atm4cT3.qi
if fileType == 'atm2':
# Match ILATM1B_20160713_195419.ATM5BT5.h5
fileList = re.findall(">ILATM1B[0-9_]*.ATM\w+.h5", indexText, re.IGNORECASE)
# Get rid of '>' and '<'
for fileIter in range(len(fileList)):
fileList[fileIter] = fileList[fileIter].replace(">", "")
fileList[fileIter] = fileList[fileIter].replace("<", "")
# Some runs, eg, https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/IODMS1B.001/2015.09.24
# have files for both GR and AN, with same frame number. Those need to be separated
# by latitude. This is a problem only with orthoimages.
badXmls = set()
outputFolder = os.path.dirname(path)
if separateByLat:
allFilesToFetch = []
allUrlsToFetch = []
for filename in fileList:
xmlFile = icebridge_common.xmlFile(filename)
url = os.path.join(folderUrl, xmlFile)
outputPath = os.path.join(outputFolder, xmlFile)
allFilesToFetch.append(outputPath)
allUrlsToFetch.append(url)
dryRun = False
icebridge_common.fetchFilesInBatches(baseCurlCmd, MAX_IN_ONE_CALL,
dryRun, outputFolder,
allFilesToFetch, allUrlsToFetch,
logger)
# Mark the bad ones
for xmlFile in allFilesToFetch:
latitude = icebridge_common.parseLatitude(xmlFile)
isGood = hasGoodLat(latitude, isSouth)
if not isGood:
badXmls.add(xmlFile)
elif (fileType == 'ortho' or fileType == 'fireball'):
# Sometimes there is a large gap in the timestamp. That means orthoimages
# from previous day are spilling over. If dayVal is 0, we must ignore
# the spillover images. If dayVal is 1, we must keep the spillover images
# and igore the others.
list1 = []
list2 = []
isBigGap = False
prevStamp = -1
for filename in fileList:
[imageDateString, imageTimeString] = icebridge_common.parseTimeStamps(filename)
currStamp = float(imageTimeString)/1000000.0 # hours
if prevStamp < 0:
list1.append(filename)
prevStamp = currStamp
continue
# Note that once isBigGap becomes true, it stays true
# even when the gap gets small again
if currStamp - prevStamp >= 6: # six hour gap is a lot
isBigGap = True
if not isBigGap:
list1.append(filename)
else:
list2.append(filename)
prevStamp = currStamp # for next iteration
if isBigGap:
if dayVal == 0:
fileList = list2[:] # current day
else:
fileList = list1[:] # spillover from prev day
# For each entry that matched the regex, record: the frame number and the file name.
frameDict = {}
urlDict = {}
badFiles = []
for filename in fileList:
if len(badXmls) > 0:
xmlFile = os.path.join(outputFolder, icebridge_common.xmlFile(filename))
if xmlFile in badXmls:
continue
frame = icebridge_common.getFrameNumberFromFilename(filename)
if frame in frameDict.keys():
# The same frame must not occur twice.
if fileType not in LIDAR_TYPES:
logger.error("Error: Found two file names with same frame number: " + \
frameDict[frame] + " and " + filename)
badFiles.append(filename)
badFiles.append(frameDict[frame])
# note that folderUrl can vary among orthoimages, as sometimes
# some of them are in a folder for the next day.
frameDict[frame] = filename
urlDict[frame] = folderUrl
# Wipe them all, to be sorted later
for badFile in badFiles:
if os.path.exists(badFile):
logger.info("Deleting: " + badFile)
os.remove(badFile)
xmlFile = icebridge_common.xmlFile(badFile)
if os.path.exists(xmlFile):
logger.info("Deleting: " + xmlFile)
os.remove(xmlFile)
if len(badFiles) > 0:
raise Exception("Found files with same frame number")
return (frameDict, urlDict)
# These exist both in AN and GR, all mixed up, and have to separate by lat
def isInSeparateByLatTable(yyyymmdd):
''''''
return yyyymmdd in ['20150923', '20150924', '20151004', '20151005',
'20151019', '20151020', '20151021', '20151022'];
def twoFlightsInOneDay(site, yyyymmdd):
'''Return true if there are two flights in one day.'''
# For this day, there are GR_20100422a and GR_20100422b
if site == 'GR' and yyyymmdd == '20100422':
return True
# For this day, there are GR_20170725a and GR_20170725b
if site == 'GR' and yyyymmdd == '20170725':
return True
return False
def getFolderUrl(yyyymmdd, year, month, day,
dayInc, # if to add one to the day
site, fileType):
'''Get full URL to the location where the files are kept.'''
# Note that yyyymmdd can equal 20100422a.
ext = ''
if len(yyyymmdd) == 9:
ext = yyyymmdd[8]
if fileType == 'nav':
# This is the simplest, usually one file per flight.
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE_FTP/IPAPP1B_GPSInsCorrected_v01'
yearFolder = makeYearFolder(year, site)
folderUrl = os.path.join(base, yearFolder)
return folderUrl
if fileType == 'jpeg':
# If yyyymmdd is 20100422, put a or b depending on dayVal
if twoFlightsInOneDay(site, yyyymmdd):
if dayInc == 0:
ext = 'a'
else:
ext = 'b'
dayInc = 0
#base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE_FTP/IODMS0_DMSraw_v01'
base = 'https://daacdata.apps.nsidc.org/pub/DATASETS/ICEBRIDGE/IODMS0_DMSraw_v01'
yearFolder = makeYearFolder(year, site)
dateFolder = makeDateFolder(year, month, day + dayInc, ext, fileType)
folderUrl = os.path.join(base, yearFolder, dateFolder)
return folderUrl
# The other types share more formatting
if twoFlightsInOneDay(site, yyyymmdd):
dayInc = 0 # for this particular day, one should not look at the next day
if fileType == 'ortho':
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/IODMS1B.001'
elif fileType == 'fireball':
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/IODMS3.001'
elif fileType == 'atm1':
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/ILATM1B.001/'
elif fileType == 'atm2':
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/ILATM1B.002/'
elif fileType == 'lvis':
base = 'https://n5eil01u.ecs.nsidc.org/ICEBRIDGE/ILVIS2.001/'
else:
raise("Unknown type: " + fileType)
dateFolder = makeDateFolder(year, month, day + dayInc, ext, fileType)
folderUrl = os.path.join(base, dateFolder)
return folderUrl
def fetchAndParseIndexFile(options, isSouth, baseCurlCmd, outputFolder):
'''Create a list of all files that must be fetched unless done already.'''
# For AN 20091112, etc, some of the ortho images are stored at the
# beginning of the next day's flight. Need to sort this out, and
# it is tricky. More comments within the code.
fetchNextDay = True
separateByLat = (options.type == 'ortho' and isInSeparateByLatTable(options.yyyymmdd))
if separateByLat:
# Here we won't fetch the next day, we will just separate by latitude within
# a given day
fetchNextDay = False
orthoOrFireball = ( (options.type == 'ortho') or (options.type == 'fireball') )
if fetchNextDay:
# Normally we fetch for next day only for ortho or fireball. However,
# for one single special flight, we do it for jpeg too, as then
# the jpegs are also split.
if orthoOrFireball or \
((options.type == 'jpeg') and twoFlightsInOneDay(options.site, options.yyyymmdd)):
fetchNextDay = True
else:
fetchNextDay = False
# If we need to parse the next flight day as well, as expected in some runs,
# we will fetch two html files, but create a single index out of them.
dayVals = [0]
if fetchNextDay:
dayVals.append(1)
indexPath = icebridge_common.htmlIndexFile(outputFolder)
currIndexPath = indexPath
parsedIndexPath = icebridge_common.csvIndexFile(outputFolder)
if options.refetchIndex:
os.system('rm -f ' + indexPath)
os.system('rm -f ' + parsedIndexPath)
if icebridge_common.fileNonEmpty(parsedIndexPath):
logger.info('Already have the index file ' + parsedIndexPath + ', keeping it.')
return parsedIndexPath
frameDict = {}
urlDict = {}
# We need the list of jpeg frames. Sometimes when fetching ortho images,
# and we have to fetch from the next day, don't fetch unless
# in the jpeg index.
if len(dayVals) > 1 and options.type != 'jpeg':
jpegFolder = icebridge_common.getJpegFolder(os.path.dirname(outputFolder))
jpegIndexPath = icebridge_common.csvIndexFile(jpegFolder)
(jpegFrameDict, jpegUrlDict) = icebridge_common.readIndexFile(jpegIndexPath)
orthoStamp = {}
if options.type == 'fireball':
# This is a bugfix. Ensure that the fireball DEM has not just
# the same frame number, but also same timestamp as the ortho.
orthoFolder = icebridge_common.getOrthoFolder(os.path.dirname(outputFolder))
orthoIndexPath = icebridge_common.csvIndexFile(orthoFolder)
(orthoFrameDict, orthoUrlDict) = icebridge_common.readIndexFile(orthoIndexPath)
for frame in sorted(orthoFrameDict.keys()):
filename = orthoFrameDict[frame]
[imageDateString, imageTimeString] = icebridge_common.parseTimeStamps(filename)
orthoStamp[frame] = imageTimeString
for dayVal in dayVals:
if len(dayVals) > 1:
currIndexPath = indexPath + '.day' + str(dayVal)
if options.refetchIndex:
os.system('rm -f ' + currIndexPath)
# Find folderUrl which contains all of the files
if options.type in LIDAR_TYPES:
options.allFrames = True # For lidar, always get all the frames!
# For lidar, the data can come from one of three sources.
# Unfortunately sometimes there is more than one source, and then
# we need to pick by latitude.
folderUrls = []
lidar_types = []
for lidar in LIDAR_TYPES:
folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
options.day, dayVal, # note here the dayVal
options.site, lidar)
logger.info('Checking lidar URL: ' + folderUrl)
if checkIfUrlExists(folderUrl, baseCurlCmd):
logger.info('Found match with lidar type: ' + lidar)
folderUrls.append(folderUrl)
lidar_types.append(lidar)
if len(folderUrls) == 0:
logger.info('WARNING: Could not find any lidar data for the given date!')
elif len(folderUrls) == 1:
# Unique solution
folderUrl = folderUrls[0]
options.type = lidar_types[0]
elif len(folderUrls) >= 2:
# Multiple solutions. Pick the good one by latitude.
logger.info("Multiples URLs to search: " + " ".join(folderUrls))
count = -1
isGood = False
for folderUrl in folderUrls:
count += 1
(localFrameDict, localUrlDict) = \
fetchAndParseIndexFileAux(isSouth,
separateByLat, dayVal,
baseCurlCmd, folderUrl,
currIndexPath,
lidar_types[count])
for frame in sorted(localFrameDict.keys()):
filename = localFrameDict[frame]
xmlFile = icebridge_common.xmlFile(filename)
url = os.path.join(folderUrl, xmlFile)
# Download the file
curlCmd = baseCurlCmd + ' ' + url + ' > ' + xmlFile
logger.info(curlCmd)
p = subprocess.Popen(curlCmd, shell=True, universal_newlines=True)
os.waitpid(p.pid, 0)
latitude = icebridge_common.parseLatitude(xmlFile)
if os.path.exists(xmlFile): os.remove(xmlFile)
if hasGoodLat(latitude, isSouth):
isGood = True
options.type = lidar_types[count]
logger.info("Good latitude " + str(latitude) + ", will use " +
folderUrl + " of type " + lidar_types[count])
else:
logger.info("Bad latitude " + str(latitude) + ", will not use " +
folderUrl + " of type " + lidar_types[count])
# Stop at first file no matter what
break
if isGood:
break
if not isGood:
if options.type in LIDAR_TYPES and options.ignoreMissingLidar:
logger.info("No lidar. None of these URLs are good: " +
" ".join(folderUrls))
else:
raise Exception("None of these URLs are good: " +
" ".join(folderUrls))
else: # Other cases are simpler
folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
options.day, dayVal, # note here the dayVal
options.site, options.type)
logger.info('Fetching from URL: ' + folderUrl)
(localFrameDict, localUrlDict) = \
fetchAndParseIndexFileAux(isSouth,
separateByLat, dayVal,
baseCurlCmd, folderUrl,
currIndexPath, options.type)
# Append to the main index
for frame in sorted(localFrameDict.keys()):
if options.type == 'fireball':
# This is a bugfix. Ensure that the fireball DEM has not just
# the same frame number, but also same timestamp as the ortho.
# Otherwise we may accidentally getting one from next day.
[imageDateString, imageTimeString] = \
icebridge_common.parseTimeStamps(localFrameDict[frame])
if frame not in orthoStamp:
#logger.info("Missing ortho for fireball: " + localFrameDict[frame])
continue
if abs(int(imageTimeString) - int(orthoStamp[frame])) > 1000:
# Apparently a tolerance is needed. Use 10 seconds, so the number 1000.
#logger.info("Will not use fireball DEM whose timestamp differs from ortho.")
#logger.info("Fireball is: " + localFrameDict[frame])
#logger.info("Ortho is: " + orthoFrameDict[frame])
continue
# Fetch from next day, unless already have a value. And don't fetch
# frames not in the jpeg index.
if len(dayVals) > 1 and options.type != 'jpeg':
if not frame in jpegFrameDict.keys(): continue
if frame in frameDict.keys(): continue
frameDict[frame] = localFrameDict[frame]
urlDict[frame] = localUrlDict[frame]
# Write the combined index file
icebridge_common.writeIndexFile(parsedIndexPath, frameDict, urlDict)
return parsedIndexPath
def lidarFilesInRange(lidarDict, lidarFolder, startFrame, stopFrame):
'''Fetch only lidar files for the given frame range. Do that as follows.
For each ortho frame in [startFrame, stopFrame], find the lidar
file with the closest timestamp. Collect them all.
Add the two neighboring ones, to help with finding lidar pairs later.'''
lidarList = []
for frame in sorted(lidarDict.keys()):
lidarList.append(lidarDict[frame])
# If we requested all frames, also get all the lidar files.
if ((startFrame == icebridge_common.getSmallestFrame()) and
(stopFrame == icebridge_common.getLargestFrame() ) ):
minLidarIndex = 0
maxLidarIndex = len(lidarList)-1
else:
minLidarIndex = len(lidarList)
maxLidarIndex = 0
# Build up a list of lidar files that match the requested input frames
orthoFolder = icebridge_common.getOrthoFolder(os.path.dirname(lidarFolder))
orthoIndexPath = icebridge_common.csvIndexFile(orthoFolder)
(orthoFrameDict, orthoUrlDict) = icebridge_common.readIndexFile(orthoIndexPath)
for frame in sorted(orthoFrameDict.keys()):
if ((frame < startFrame) or (frame > stopFrame) ): continue
orthoFrame = orthoFrameDict[frame]
try:
matchingLidar = icebridge_common.findMatchingLidarFileFromList(orthoFrame, lidarList)
except:
# Some image files don't have a matching lidar file, just keep going.
continue
for index in range(len(lidarList)):
if lidarList[index] == matchingLidar:
if minLidarIndex > index:
minLidarIndex = index
if maxLidarIndex < index:
maxLidarIndex = index
# We will fetch neighboring lidar files as well
if minLidarIndex > 0:
minLidarIndex = minLidarIndex -1
if maxLidarIndex + 1 < len(lidarList):
maxLidarIndex = maxLidarIndex + 1
lidarsToFetch = set()
if lidarList:
for index in range(minLidarIndex, maxLidarIndex+1): # Fetch only the requested lidar files.
lidarsToFetch.add(lidarList[index])
return lidarsToFetch
def fetchNavData(options, outputFolder):
'''Fetch all the nav data for a flight.'''
success = False
# The storage convention for these is very easy!
# - A few dates have two files instead of one.
folderUrl = getFolderUrl(options.yyyymmdd, options.year, options.month,
options.day, False,
options.site, options.type)
filename = 'sbet_' + options.yyyymmdd + '.out'
filenameA = 'sbet_' + options.yyyymmdd + 'a.out'
filenameB = 'sbet_' + options.yyyymmdd + 'b.out'
# Check which urls are accurate for this file
# This is not robust enough, as it can return good status even when the data is missing.
# So comment it out. Rather fetch all files and check them later.
#url = folderUrl + filename
#if checkIfUrlExists(url):
# fileList = [filename]
#else:
# fileList = [filenameA, filenameB]
fileList = [filename, filenameA, filenameB]
if options.refetchNav:
cmd = "rm -f " + os.path.join(outputFolder, "sbet_*")
print(cmd)
os.system(cmd)
# Download the files
for f in fileList:
url = os.path.join(folderUrl, f)
outputPath = os.path.join(outputFolder, f)
# TODO: How to handle refetch?
if validateNavOrWipe(outputPath, logger):
success = True
continue
# This times out, so avoid it
#if not checkIfUrlExists(url):
# continue
ans = icebridge_common.fetchFile(url, outputPath)
if not ans:
logger.info("Bad url: " + url)
continue
if validateNavOrWipe(outputPath, logger):
success = True
if success:
return 0
return 1
def doFetch(options, outputFolder):
'''The main fetch function.
Returns the number of failures.'''
# Verify that required files exist
home = os.path.expanduser("~")
if not (os.path.exists(home+'/.netrc') and os.path.exists(home+'/.urs_cookies')):
logger.error('Missing a required authentication file! See instructions here:\n' +
' https://nsidc.org/support/faq/what-options-are-available-bulk-' +
'downloading-data-https-earthdata-login-enabled')
return -1
curlPath = asp_system_utils.which("curl")
curlOpts = ' -n -L '
cookiePaths = ' -b ~/.urs_cookies -c ~/.urs_cookies '
baseCurlCmd = curlPath + curlOpts + cookiePaths
logger.info('Creating output folder: ' + outputFolder)
os.system('mkdir -p ' + outputFolder)
isSouth = (options.site == 'AN')
if options.type == 'nav': # Nav fetching is much less complicated
return fetchNavData(options, outputFolder)
parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd, outputFolder)
if not icebridge_common.fileNonEmpty(parsedIndexPath):
# Some dirs are weird, both images, fireball dems, and ortho.
# Just accept whatever there is, but with a warning.
logger.info('Warning: Missing index file: ' + parsedIndexPath)
# Store file information in a dictionary
# - Keep track of the earliest and latest frame
logger.info('Reading file list from ' + parsedIndexPath)
try:
(frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)
except:
# We probably ran into old format index file. Must refetch.
logger.info('Could not read index file. Try again.')
options.refetchIndex = True
parsedIndexPath = fetchAndParseIndexFile(options, isSouth, baseCurlCmd, outputFolder)
(frameDict, urlDict) = icebridge_common.readIndexFile(parsedIndexPath)
if options.stopAfterIndexFetch:
return 0
isLidar = (options.type in LIDAR_TYPES)
allFrames = sorted(frameDict.keys())
if not isLidar:
# The lidar frames use a totally different numbering than the image/ortho/dem frames
firstFrame = icebridge_common.getLargestFrame() # start big
lastFrame = icebridge_common.getSmallestFrame() # start small
for frameNumber in allFrames:
if frameNumber < firstFrame:
firstFrame = frameNumber
if frameNumber > lastFrame:
lastFrame = frameNumber
if options.allFrames:
options.startFrame = firstFrame
options.stopFrame = lastFrame
if isLidar:
# Based on image frames, determine which lidar frames to fetch.
if options.ignoreMissingLidar and len(frameDict.keys()) == 0:
# Nothing we can do if this run has no lidar and we are told to continue
logger.info("Warning: missing lidar, but continuing.")
lidarsToFetch = set()
else:
lidarsToFetch = lidarFilesInRange(frameDict, outputFolder,
options.startFrame, options.stopFrame)
# There is always a chance that not all requested frames are available.
# That is particularly true for Fireball DEMs. Instead of failing,
# just download what is present and give a warning.
if options.startFrame not in frameDict and not isLidar:
logger.info("Warning: Frame " + str(options.startFrame) +
" is not found in this flight.")
if options.stopFrame and (options.stopFrame not in frameDict) and not isLidar:
logger.info("Warning: Frame " + str(options.stopFrame) +
" is not found in this flight.")
allFilesToFetch = [] # Files that we will fetch, relative to the current dir.
allUrlsToFetch = [] # Full url of each file.
# Loop through all found frames within the provided range
currentFileCount = 0
lastFrame = ""
if len(allFrames) > 0:
lastFrame = allFrames[len(allFrames)-1]
hasTfw = (options.type == 'fireball')
hasXml = ( isLidar or (options.type == 'ortho') or hasTfw )
numFetched = 0
skipCount = 0
for frame in allFrames:
# Skip frame outside of range
if isLidar:
if frameDict[frame] not in lidarsToFetch:
continue
else:
if ((frame < options.startFrame) or (frame > options.stopFrame) ):
continue
# Handle the frame skip option
if options.frameSkip > 0:
if skipCount < options.frameSkip:
skipCount += 1
continue
skipCount = 0
filename = frameDict[frame]
# Some files have an associated xml file. Fireball DEMs also have a tfw file.
currFilesToFetch = [filename]
if hasXml:
currFilesToFetch.append(icebridge_common.xmlFile(filename))
if hasTfw:
currFilesToFetch.append(icebridge_common.tfwFile(filename))
for filename in currFilesToFetch:
url = os.path.join(urlDict[frame], filename)
outputPath = os.path.join(outputFolder, filename)
allFilesToFetch.append(outputPath)
allUrlsToFetch.append(url)
# Restrict lidar fetch amount according to the parameter
if (isLidar and options.maxNumLidarToFetch > 0 and
len(allFilesToFetch) > options.maxNumLidarToFetch):
# Ensure an even number, to fetch both the lidar file and its xml
if options.maxNumLidarToFetch % 2 == 1:
options.maxNumLidarToFetch += 1
allFilesToFetch = allFilesToFetch[0:options.maxNumLidarToFetch]
allUrlsToFetch = allUrlsToFetch [0:options.maxNumLidarToFetch]
icebridge_common.fetchFilesInBatches(baseCurlCmd, MAX_IN_ONE_CALL, options.dryRun,
outputFolder,
allFilesToFetch, allUrlsToFetch, logger)
# Fetch from disk the set of already validated files, if any
validFilesList = icebridge_common.validFilesList(os.path.dirname(outputFolder),
options.startFrame, options.stopFrame)
validFilesSet = set()
validFilesSet = icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
numInitialValidFiles = len(validFilesSet)
# Verify that all files were fetched and are in good shape
failedFiles = []
for outputPath in allFilesToFetch:
if options.skipValidate:
continue
if not icebridge_common.fileNonEmpty(outputPath):
logger.info('Missing file: ' + outputPath)
failedFiles.append(outputPath)
continue
if icebridge_common.hasImageExtension(outputPath):
if False:
# This check is just so slow. Turn it off for now.
# This will impact only the validation of jpegs,
# as the other files can be validated via the checksum.
# Jpegs will be validated when converting them to 1 band images
if outputPath in validFilesSet and os.path.exists(outputPath):
#logger.info('Previously validated: ' + outputPath) # verbose
continue
else:
if not icebridge_common.isValidImage(outputPath):
logger.info('Found an invalid image. Will wipe it: ' + outputPath)
if os.path.exists(outputPath): os.remove(outputPath)
failedFiles.append(outputPath)
continue
else:
logger.info('Valid image: ' + outputPath)
validFilesSet.add(outputPath) # mark it as validated
# Sanity check: XML files must have the right latitude.
if icebridge_common.fileExtension(outputPath) == '.xml':
if outputPath in validFilesSet and os.path.exists(outputPath):
#logger.info('Previously validated: ' + outputPath) #verbose
continue
else:
if os.path.exists(outputPath):
try:
latitude = icebridge_common.parseLatitude(outputPath)
logger.info('Valid file: ' + outputPath)
validFilesSet.add(outputPath) # mark it as validated
except:
# Corrupted file
logger.info("Failed to parse latitude, will wipe: " + outputPath)
if os.path.exists(outputPath): os.remove(outputPath)
failedFiles.append(outputPath)
# On a second thought, don't wipe files with wrong latitude, as
# next time we run fetch we will have to fetch them again.
# Hopefully they will be ignored.
#isGood = hasGoodLat(latitude, isSouth)
#if not isGood:
# logger.info("Wiping XML file " + outputPath + " with bad latitude " + \
# str(latitude))
# os.remove(outputPath)
# imageFile = icebridge_common.xmlToImage(outputPath)
# if os.path.exists(imageFile):
# logger.info("Wiping TIF file " + imageFile + " with bad latitude " + \
# str(latitude))
# os.remove(imageFile)
# Verify the chcksum
if hasXml and len(outputPath) >= 4 and outputPath[-4:] != '.xml' \
and outputPath[-4:] != '.tfw':
if outputPath in validFilesSet and os.path.exists(outputPath):
#logger.info('Previously validated: ' + outputPath) # verbose
continue
else:
isGood = icebridge_common.hasValidChkSum(outputPath, logger)
if not isGood:
xmlFile = icebridge_common.xmlFile(outputPath)
logger.info('Found invalid data. Will wipe: ' + outputPath + ' ' + xmlFile)
if os.path.exists(outputPath): os.remove(outputPath)
if os.path.exists(xmlFile): os.remove(xmlFile)
failedFiles.append(outputPath)
failedFiles.append(xmlFile)
continue
else:
logger.info('Valid file: ' + outputPath)
validFilesSet.add(outputPath)
if hasTfw and icebridge_common.fileExtension(outputPath) == '.tfw':
if outputPath in validFilesSet and os.path.exists(outputPath):
#logger.info('Previously validated: ' + outputPath)
continue
else:
isGood = icebridge_common.isValidTfw(outputPath, logger)
if not isGood:
xmlFile = icebridge_common.xmlFile(outputPath)
logger.info('Found invalid tfw. Will wipe: ' + outputPath + ' ' + xmlFile)
if os.path.exists(outputPath): os.remove(outputPath)
if os.path.exists(xmlFile): os.remove(xmlFile)
failedFiles.append(outputPath)
failedFiles.append(xmlFile)
continue
else:
logger.info('Valid tfw file: ' + outputPath)
validFilesSet.add(outputPath)
# Write to disk the list of validated files, but only if new
# validations happened. First re-read that list, in case a
# different process modified it in the meantime, such as if two
# managers are running at the same time.
numFinalValidFiles = len(validFilesSet)
if numInitialValidFiles != numFinalValidFiles:
validFilesSet = \
icebridge_common.updateValidFilesListFromDisk(validFilesList, validFilesSet)
icebridge_common.writeValidFilesList(validFilesList, validFilesSet)
numFailed = len(failedFiles)
if numFailed > 0:
logger.info("Number of files that could not be processed: " + str(numFailed))
return numFailed
def main(argsIn):
# Command line parsing
try:
usage = "usage: fetch_icebridge_data.py [options] output_folder"
parser = optparse.OptionParser(usage=usage)
parser.add_option("--year", dest="year", type='int', default=None,
help="Number of processes to use (default program tries to choose best)")
parser.add_option("--month", dest="month", type='int', default=None,
help="Number of processes to use (default program tries to choose best)")
parser.add_option("--day", dest="day", type='int', default=None,
help="Number of processes to use (default program tries to choose best)")
parser.add_option("--yyyymmdd", dest="yyyymmdd", default=None,
help="Specify the year, month, and day in one YYYYMMDD string.")
parser.add_option("--site", dest="site", default=None,
help="Name of the location of the images (AN or GR)")
parser.add_option("--start-frame", dest="startFrame", type='int',
default=icebridge_common.getSmallestFrame(),
help="Frame number or start of frame sequence")
parser.add_option("--stop-frame", dest="stopFrame", type='int',
default=icebridge_common.getLargestFrame(),
help="End of frame sequence to download.")
parser.add_option("--all-frames", action="store_true", dest="allFrames",
default=False,
help="Fetch all frames for this flight.")
parser.add_option("--skip-validate", action="store_true", dest="skipValidate",
default=False,
help="Skip input data validation.")
parser.add_option("--ignore-missing-lidar", action="store_true", dest="ignoreMissingLidar",
default=False,
help="Keep going if the lidar is missing.")
parser.add_option("--frame-skip", dest="frameSkip", type='int', default=0,
help="Skip this many frames between downloads.")
parser.add_option("--dry-run", action="store_true", dest="dryRun",
default=False,
help="Just print the image/ortho/fireball download commands.")
parser.add_option("--refetch-index", action="store_true", dest="refetchIndex",
default=False,
help="Force refetch of the index file.")
parser.add_option("--refetch-nav", action="store_true", dest="refetchNav",
default=False,
help="Force refetch of the nav file.")
parser.add_option("--stop-after-index-fetch", action="store_true",
dest="stopAfterIndexFetch", default=False,
help="Stop after fetching the indices.")
parser.add_option('--max-num-lidar-to-fetch', dest='maxNumLidarToFetch', default=-1,
type='int', help='The maximum number of lidar files to fetch. ' + \
'This is used in debugging.')
# This call handles all the parallel_mapproject specific options.
(options, args) = parser.parse_args(argsIn)
if len(args) != 1:
logger.info('Error: Missing output folder.\n' + usage)
return -1
outputFolder = os.path.abspath(args[0])
# TODO: Restore "type" input parameter so that outside users who do not use
# our folder convention can use this tool.
options.type = icebridge_common.folderToType(outputFolder)
if options.type == 'lidar':
options.type = LIDAR_TYPES[0]
print ('Detected type: ' + options.type)
# Handle unified date option
if options.yyyymmdd:
options.year = int(options.yyyymmdd[0:4])
options.month = int(options.yyyymmdd[4:6])
options.day = int(options.yyyymmdd[6:8])
if not options.stopFrame:
options.stopFrame = options.startFrame
# Error checking
if (not options.year) or (not options.month) or (not options.day):
logger.error('Error: year, month, and day must be provided.\n' + usage)
return -1
# Ortho and Fireball DEM files don't need this information to find them.
if (options.type == 'jpeg') and not (options.site == 'AN' or options.site == 'GR'):
logger.error('Error, site must be AN or GR for images.\n' + usage)
return -1
KNOWN_TYPES = ['jpeg', 'ortho', 'fireball', 'nav'] + LIDAR_TYPES
if not (options.type.lower() in KNOWN_TYPES):
logger.error('Error, type must be image, ortho, fireball, or a lidar type.\n' + usage)
return -1
except optparse.OptionError as msg:
raise Exception(msg)
# Make several attempts. Stop if there is no progress.
numPrevFailed = -1
numFailed = -1
for attempt in range(10):
numFailed = doFetch(options, outputFolder)
if numFailed == 0:
return 0 # Success
if numFailed == numPrevFailed:
logger.info("No progress in attempt %d" % (attempt+1))
return -1
# Try again
logger.info("Failed to fetch all in attempt %d, will try again.\n" % (attempt+1))
numPrevFailed = numFailed
return -1 # We should not come all the way to here
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
|
#!/usr/bin/env python3
import os
import sys
import socket
import time
try:
from urllib.parse import urlparse # Python 3
except ImportError:
from urlparse import urlparse # Python 2
import posixpath
import json
from hashlib import sha1
from base64 import b64encode
import requests
import classad
TOKEN_DIR_ENV_NAME = '_CONDOR_CREDS'
TOKEN_FILE_EXT = '.use'
DEFAULT_TIMEOUT = 30
BOX_PLUGIN_VERSION = '1.1.0'
BOX_API_VERSION = '2.0'
BOX_API_BASE_URL = 'https://api.box.com/' + BOX_API_VERSION
# Two methods of uploading, depending on file size:
# https://developer.box.com/guides/uploads/direct/
# https://developer.box.com/guides/uploads/chunked/
DIRECT_UPLOAD_CUTOFF_MB = 35
def print_help(stream = sys.stderr):
help_msg = '''Usage: {0} -infile <input-filename> -outfile <output-filename>
{0} -classad
Options:
-classad Print a ClassAd containing the capablities of this
file transfer plugin.
-infile <input-filename> Input ClassAd file
-outfile <output-filename> Output ClassAd file
-upload
'''
stream.write(help_msg.format(sys.argv[0]))
def print_capabilities():
capabilities = {
'MultipleFileSupport': True,
'PluginType': 'FileTransfer',
'SupportedMethods': 'box',
'Version': BOX_PLUGIN_VERSION,
}
sys.stdout.write(classad.ClassAd(capabilities).printOld())
def parse_args():
'''The optparse library can't handle the types of arguments that the file
transfer plugin sends, the argparse library can't be expected to be
found on machines running EL 6 (Python 2.6), and a plugin should not
reach outside the standard library, so the plugin must roll its own argument
parser. The expected input is very rigid, so this isn't too awful.'''
# The only argument lists that are acceptable are
# <this> -classad
# <this> -infile <input-filename> -outfile <output-filename>
# <this> -outfile <output-filename> -infile <input-filename>
if not len(sys.argv) in [2, 5, 6]:
print_help()
sys.exit(1)
# If -classad, print the capabilities of the plugin and exit early
if (len(sys.argv) == 2) and (sys.argv[1] == '-classad'):
print_capabilities()
sys.exit(0)
# If -upload, set is_upload to True and remove it from the args list
is_upload = False
if '-upload' in sys.argv[1:]:
is_upload = True
sys.argv.remove('-upload')
# -infile and -outfile must be in the first and third position
if not (
('-infile' in sys.argv[1:]) and
('-outfile' in sys.argv[1:]) and
(sys.argv[1] in ['-infile', '-outfile']) and
(sys.argv[3] in ['-infile', '-outfile']) and
(len(sys.argv) == 5)):
print_help()
sys.exit(1)
infile = None
outfile = None
try:
for i, arg in enumerate(sys.argv):
if i == 0:
continue
elif arg == '-infile':
infile = sys.argv[i+1]
elif arg == '-outfile':
outfile = sys.argv[i+1]
except IndexError:
print_help()
sys.exit(1)
return {'infile': infile, 'outfile': outfile, 'upload': is_upload}
def get_token_name(url):
scheme = url.split('://')[0]
if '+' in scheme:
(handle, provider) = scheme.split('+')
token_name = '{0}_{1}'.format(provider, handle)
else:
token_name = scheme
return token_name
def get_token_path(token_name):
if TOKEN_DIR_ENV_NAME in os.environ:
cred_dir = os.environ[TOKEN_DIR_ENV_NAME]
else:
raise KeyError("Required variable '{0}' was not found in job's environment".format(TOKEN_DIR_ENV_NAME))
token_path = os.path.join(cred_dir, token_name + TOKEN_FILE_EXT)
if not os.path.exists(token_path):
raise IOError(2, 'Token file not found', token_path)
return token_path
def format_error(error):
return '{0}: {1}'.format(type(error).__name__, str(error))
def get_error_dict(error, url = ''):
error_string = format_error(error)
error_dict = {
'TransferSuccess': False,
'TransferError': error_string,
'TransferUrl': url,
}
return error_dict
class BoxPlugin:
def __init__(self, token_path):
self.token_path = token_path
self.token = self.get_token(self.token_path)
self.headers = {'Authorization': 'Bearer {0}'.format(self.token)}
self.path_ids = {'/': u'0'}
def get_token(self, token_path):
with open(token_path, 'r') as f:
access_token = json.load(f)['access_token']
return access_token
def reload_token(self):
self.token = self.get_token(self.token_path)
self.headers['Authorization'] = 'Bearer {0}'.format(self.token)
def parse_url(self, url):
# Build the folder tree
parsed_url = urlparse(url)
folder_tree = []
if parsed_url.netloc != '':
folder_tree.append(parsed_url.netloc)
if parsed_url.path != '':
path = posixpath.split(parsed_url.path)
while path[1] != '':
folder_tree.insert(1, path[1])
path = posixpath.split(path[0])
# The file is the last item in the tree
filename = folder_tree.pop()
return (filename, folder_tree)
def api_call(self, endpoint, method = 'GET', params = None, data = {}):
self.reload_token()
url = BOX_API_BASE_URL + endpoint
kwargs = {
'headers': self.headers,
'timeout': DEFAULT_TIMEOUT,
}
if params is not None:
kwargs['params'] = params
if method in ['POST', 'PUT', 'PATCH']:
kwargs['data'] = data
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response.json()
def create_folder(self, folder_name, parent_id):
endpoint = '/folders'
data = json.dumps({
'name': folder_name,
'parent': {'id': parent_id}
})
try:
folder_info = self.api_call(endpoint, 'POST', data = data)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 409: # folder already exists
folder_info = e.response.json()['context_info']['conflicts'][0]
else:
raise e
return folder_info['id']
def get_object_id(self, object_name, object_type, object_parents):
endpoint = '/folders/{0}/items'.format(object_parents[-1])
fields = ['id', 'name', 'type', 'path_collection']
limit = 1000
params = {
'fields': ','.join(fields),
'limit': limit,
}
folder_items = self.api_call(endpoint, params = params)
object_found = False
while not object_found:
for entry in folder_items['entries']:
# First do a quick check against the name and type
if (entry['name'] != object_name) or (entry['type'] != object_type):
continue
# Then compare parents
entry_parents = [p['id'] for p in entry['path_collection']['entries']]
if set(entry_parents) == set(object_parents):
object_id = entry['id']
object_found = True
break
# Go to next page if it exists and if haven't found object yet
if object_found:
pass
elif (('next_marker' in folder_items) and
(folder_items['next_marker'] not in [None, ''])):
params['next_marker'] = folder_items['next_marker']
folder_items = self.api_call(endpoint, params = params)
else:
raise IOError(2, 'Object not found', object_name)
return object_id
def get_parent_folders_ids(self, folder_tree, create_if_missing = False):
# Traverse the folder tree, starting at the root (id = 0)
parent_ids = [u'0']
searched_path = ''
for folder_name in folder_tree:
searched_path += '/{0}'.format(folder_name)
if searched_path in self.path_ids: # Check the cached ids
parent_id = self.path_ids[searched_path]
else:
try:
parent_id = self.get_object_id(folder_name, 'folder', parent_ids)
except IOError:
if create_if_missing:
parent_id = self.create_folder(folder_name, parent_id = parent_ids[-1])
else:
raise IOError(2, 'Folder not found in Box', searched_path)
self.path_ids[searched_path] = parent_id # Update the cached ids
parent_ids.append(parent_id)
return parent_ids
def get_file_id(self, url):
# Parse out the filename and folder_tree and get folder_ids
(filename, folder_tree) = self.parse_url(url)
parent_ids = self.get_parent_folders_ids(folder_tree)
try:
file_id = self.get_object_id(filename, 'file', parent_ids)
except IOError:
raise IOError(2, 'File not found in Box', '{0}/{1}'.format('/'.join(folder_tree), filename))
return file_id
def download_file(self, url, local_file_path):
start_time = time.time()
file_id = self.get_file_id(url)
endpoint = '/files/{0}/content'.format(file_id)
download_url = BOX_API_BASE_URL + endpoint
# Stream the data to disk, chunk by chunk,
# instead of loading it all into memory.
self.reload_token()
connection_start_time = time.time()
response = requests.get(download_url, headers = self.headers, stream = True,
timeout = DEFAULT_TIMEOUT)
try:
response.raise_for_status()
try:
content_length = int(response.headers['Content-Length'])
except (ValueError, KeyError):
content_length = False
with open(local_file_path, 'wb') as f:
file_size = 0
for chunk in response.iter_content(chunk_size = 8192):
file_size += len(chunk)
f.write(chunk)
except Exception as err:
# Since we're streaming, we should
# free the connection before raising the exception.
response.close()
raise err
end_time = time.time()
# Note that we *don't* include the TransferUrl:
# 'TransferUrl': response.url.encode()
# This would leak a short-lived URL into the transfer_history log
# that anyone could use to access the requested content.
transfer_stats = {
'TransferSuccess': True,
'TransferProtocol': 'https',
'TransferType': 'download',
'TransferFileName': local_file_path,
'TransferFileBytes': file_size,
'TransferTotalBytes': content_length or file_size,
'TransferStartTime': int(start_time),
'TransferEndTime': int(end_time),
'ConnectionTimeSeconds': end_time - connection_start_time,
'TransferHostName': urlparse(response.url.encode()).netloc,
'TransferLocalMachineName': socket.gethostname(),
}
return transfer_stats
def upload_file(self, url, local_file_path):
# Determine file upload method
local_file_size_mb = float(os.stat(local_file_path).st_size) / 1e6
if local_file_size_mb > DIRECT_UPLOAD_CUTOFF_MB:
transfer_stats = self.upload_file_chunked(url, local_file_path)
else:
transfer_stats = self.upload_file_direct(url, local_file_path)
return transfer_stats
def upload_file_chunked(self, url, local_file_path):
file_size = os.stat(local_file_path).st_size
start_time = time.time()
# Check if file exists
file_id = None
try:
file_id = self.get_file_id(url)
except IOError:
(filename, folder_tree) = self.parse_url(url)
parent_ids = self.get_parent_folders_ids(folder_tree, create_if_missing = True)
parent_id = parent_ids[-1]
if file_id is None:
# Upload a new file
data = {
"folder_id": parent_id,
"file_size": file_size,
"file_name": filename,
}
session_url = 'https://upload.box.com/api/2.0/files/upload_sessions'
else:
# Upload a new version of the file
data = {"file_size": file_size}
session_url = 'https://upload.box.com/api/2.0/files/{0}/upload_sessions'.format(file_id)
# initialize the session
self.reload_token()
headers = self.headers.copy()
connection_start_time = time.time()
response = requests.post(session_url, headers = headers, json = data)
response.raise_for_status()
session = response.json()
# upload the file in parts defined by the session
session_id = session['id']
upload_url = session['session_endpoints']['upload_part']
part_size = session['part_size']
parts = []
with open(local_file_path, 'rb') as f:
file_sha1 = sha1()
while True:
part = f.read(part_size)
if not part:
break
file_sha1.update(part)
part_sha1 = sha1(part)
digest = "sha={0}".format(b64encode(part_sha1.digest()).decode('utf-8'))
content_range = "bytes {0}-{1}/{2}".format(len(parts) * part_size, len(parts) * part_size + len(part) - 1, file_size)
content_type = "application/octet-stream"
for part_tries in range(3):
self.reload_token()
headers = self.headers.copy()
headers['Digest'] = digest
headers['Content-Range'] = content_range
headers['Content-Type'] = content_type
# retry each part up to three times
try:
response = requests.put(upload_url, headers = headers, data = part)
response.raise_for_status()
except Exception as err:
if part_tries >= 2:
raise err
else:
pass
else:
try:
parts.append(response.json()['part'])
except Exception as err:
if part_tries >= 2:
raise err
else:
pass
else:
break
# commit the session
commit_url = session['session_endpoints']['commit']
digest = "sha={0}".format(b64encode(file_sha1.digest()).decode('utf-8'))
data = {'parts': parts}
self.reload_token()
headers = self.headers.copy()
headers['Digest'] = digest
response = requests.post(commit_url, headers = headers, json = data)
response.raise_for_status()
file_size = int(response.json()['entries'][0]['size'])
end_time = time.time()
transfer_stats = {
'TransferSuccess': True,
'TransferProtocol': 'https',
'TransferType': 'upload',
'TransferFileName': local_file_path,
'TransferFileBytes': file_size,
'TransferTotalBytes': file_size,
'TransferStartTime': int(start_time),
'TransferEndTime': int(end_time),
'ConnectionTimeSeconds': end_time - connection_start_time,
'TransferHostName': urlparse(str(upload_url)).netloc,
'TransferLocalMachineName': socket.gethostname(),
'TransferUrl': 'https://upload.box.com/api/2.0/files/upload_sessions',
}
return transfer_stats
def upload_file_direct(self, url, local_file_path):
start_time = time.time()
# Check if file exists
file_id = None
try:
file_id = self.get_file_id(url)
except IOError:
(filename, folder_tree) = self.parse_url(url)
parent_ids = self.get_parent_folders_ids(folder_tree, create_if_missing = True)
parent_id = parent_ids[-1]
files = {
'file': open(local_file_path, 'rb'),
}
if file_id is None:
# Upload a new file
data = {
'attributes': json.dumps({
'name': filename,
'parent': {'id': parent_id},
}),
}
upload_url = 'https://upload.box.com/api/2.0/files/content'
self.reload_token()
connection_start_time = time.time()
response = requests.post(upload_url, headers = self.headers, data = data, files = files)
else:
# Upload a new version of the file
upload_url = 'https://upload.box.com/api/2.0/files/{0}/content'.format(file_id)
self.reload_token()
connection_start_time = time.time()
response = requests.post(upload_url, headers = self.headers, files = files)
response.raise_for_status()
try:
content_length = int(response.request.headers['Content-Length'])
except (ValueError, KeyError):
content_length = False
file_size = int(response.json()['entries'][0]['size'])
end_time = time.time()
transfer_stats = {
'TransferSuccess': True,
'TransferProtocol': 'https',
'TransferType': 'upload',
'TransferFileName': local_file_path,
'TransferFileBytes': file_size,
'TransferTotalBytes': content_length or file_size,
'TransferStartTime': int(start_time),
'TransferEndTime': int(end_time),
'ConnectionTimeSeconds': end_time - connection_start_time,
'TransferHostName': urlparse(str(upload_url)).netloc,
'TransferLocalMachineName': socket.gethostname(),
'TransferUrl': 'https://upload.box.com/api/2.0/files/content',
}
return transfer_stats
if __name__ == '__main__':
# Per the design doc, all failures should result in exit code 1.
# This is true even if we cannot write a ClassAd to the outfile,
# so we catch all exceptions, try to write to the outfile if we can
# and always exit -1 on error.
#
# Exiting 1 without an outfile thus means one of two things:
# 1. Couldn't parse arguments.
# 2. Couldn't open outfile for writing.
try:
del os.environ['HTTPS_PROXY']
except Exception:
pass
try:
args = parse_args()
except Exception:
sys.exit(1)
try:
infile_ads = classad.parseAds(open(args['infile'], 'r'))
except Exception as err:
try:
with open(args['outfile'], 'w') as outfile:
outfile_dict = get_error_dict(err)
outfile.write(str(classad.ClassAd(outfile_dict)))
except Exception:
pass
sys.exit(1)
try:
running_plugins = {}
with open(args['outfile'], 'w') as outfile:
for ad in infile_ads:
tries = 0
try:
token_name = get_token_name(ad['Url'])
token_path = get_token_path(token_name)
# Use existing plugin objects if possible because they have
# cached object ids, which make path lookups much faster in
# the case of multiple file downloads/uploads.
if token_path in running_plugins:
box = running_plugins[token_path]
else:
box = BoxPlugin(token_path)
running_plugins[token_path] = box
while tries < 3:
tries += 1
try:
if not args['upload']:
outfile_dict = box.download_file(ad['Url'], ad['LocalFileName'])
else:
outfile_dict = box.upload_file(ad['Url'], ad['LocalFileName'])
except IOError as err:
# Retry on socket closed unexpectedly
if (err.errno == 32) and (tries < 3):
pass
else:
raise err
else:
break
outfile.write(str(classad.ClassAd(outfile_dict)))
except Exception as err:
try:
outfile_dict = get_error_dict(err, url = ad['Url'])
outfile.write(str(classad.ClassAd(outfile_dict)))
except Exception:
pass
# Ask condor_starter to retry on 401
if (isinstance(err, requests.exceptions.HTTPError)
and err.response.status_code == 401):
sys.exit(1)
else:
sys.exit(1)
except Exception:
sys.exit(1)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from horizon import tabs
from horizon import tables
from openstack_dashboard.api import keystone
from openstack_dashboard.api import network
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.connections.\
top_talkers.tables import TopTalkersTable
from openstack_dashboard.dashboards.project.connections.\
reachability_tests.tables import ReachabilityTestsTable
from openstack_dashboard.dashboards.project.connections.mockapi import NetworkTemplateAPI
from openstack_dashboard.dashboards.project.connections.reachability_tests.reachability_test_api import ReachabilityTestAPI
from openstack_dashboard.dashboards.project.connections.reachability_tests.reachability_test_db import \
ReachabilityTest, ReachabilityTestResult, ReachabilityQuickTest, ReachabilityQuickTestResult
import openstack_dashboard.dashboards.project.connections.bsn_api as bsn_api
from openstack_dashboard.dashboards.project.connections.network_template \
import network_template_api
LOG = logging.getLogger(__name__)
class DeleteTemplateAction(tables.DeleteAction):
data_type_singular = _("Network Template")
data_type_plural = _("Network Templates")
def delete(self, request, obj_id):
try:
network_template_api.delete_template_by_id(obj_id)
except Exception as e:
LOG.info(str(e))
messages.error(
request, _("Unable to delete template. Template may "
"be in use by a tenant."))
class CreateTemplateAction(tables.LinkAction):
name = "create"
verbose_name = _("Create Network Template")
url = "horizon:admin:connections:network_template_admin:create"
classes = ("ajax-modal", "btn-create")
class RemoveTemplateAction(tables.LinkAction):
name = "remove"
url = "horizon:project:connections:network_template:remove"
classes = ("ajax-modal", "btn-danger")
verbose_name = _("Remove Network Template Instance")
def allowed(self, request, datum):
tid = request.user.tenant_id
return (True if network_template_api.get_tenant_stack_assignment(tid)
else False)
class ApplyTemplateAction(tables.LinkAction):
name = "apply"
verbose_name = _("Apply Network Template")
url = "horizon:project:connections:network_template:select"
classes = ("ajax-modal", "btn-create")
def allowed(self, request, datum):
tid = request.user.tenant_id
return (False if network_template_api.get_tenant_stack_assignment(tid)
else True)
class NetworkTemplateTable(tables.DataTable):
template_name = tables.Column("template_name",
verbose_name=_("Template Name"))
heat_stack_name = tables.Column("heat_stack_name",
verbose_name=_("Heat Stack Name"))
description = tables.Column("description", verbose_name=_("Description"))
status = tables.Column("status", verbose_name=_("Status"))
resources = tables.Column("resources", verbose_name=_("Resources"))
def get_object_id(self, stack):
return None
class Meta:
multi_select = False
name = "networktemplate"
verbose_name = _("Network Template")
table_actions = (ApplyTemplateAction, RemoveTemplateAction)
row_actions = tuple()
class NetworkTemplateAdminTable(tables.DataTable):
template_id = tables.Column("id",
verbose_name=_("Template ID"))
template_name = tables.Column(
"template_name",
link=("horizon:admin:connections:network_template_admin:detail"),
verbose_name=_("Template Name"))
def get_object_id(self, template):
return template.id
class Meta:
name = "networktemplate_admin"
verbose_name = _("Network Template Administration")
table_actions = (CreateTemplateAction, DeleteTemplateAction)
row_actions = (DeleteTemplateAction,)
class NetworkTemplateAdminTab(tabs.TableTab):
table_classes = (NetworkTemplateAdminTable,)
name = _("Network Template Admin")
slug = "network_template_tab_admin"
template_name = "horizon/common/_detail_table.html"
# TODO(kevinbenton): delete this file if not needed
# template_name = "project/connections/network_template/_template_adminhome.html"
def allowed(self, request):
return (self.request.path_info.startswith('/admin/') and
super(NetworkTemplateAdminTab, self).allowed(request))
def get_networktemplate_admin_data(self):
return network_template_api.get_network_templates()
class NetworkTemplateTab(tabs.TableTab):
table_classes = (NetworkTemplateTable,)
name = _("Network Template")
slug = "network_template_tab"
template_name = "horizon/common/_detail_table.html"
def allowed(self, request):
# don't show tab to tenants if heat isn't installed
if not network_template_api.is_heat_available(request):
return False
# don't show the regular template tab to admins
return (not request.path_info.startswith('/admin/')
and super(NetworkTemplateTab, self).allowed(request))
def get_networktemplate_data(self):
topology = network_template_api.get_stack_topology(self.request)
if not topology.get('assign'):
return []
tabledata = {
'template_name': topology['assign'].template.template_name,
'heat_stack_name': topology['stack'].stack_name,
'description': topology['stack'].description,
'status': topology['stack'].stack_status_reason,
'resources': mark_safe('<br>'.join([
('%s (%s)' % (r.resource_name,
r.resource_type)).replace(' ', ' ')
for r in topology['stack_resources']]))
}
return [tabledata]
class ReachabilityTestsTab(tabs.TableTab):
table_classes = (ReachabilityTestsTable,)
name = _("Reachability Tests")
slug = "reachability_test_tab"
template_name = "horizon/common/_detail_table.html"
def get_reachability_tests_data(self):
api = ReachabilityTestAPI()
with bsn_api.Session.begin(subtransactions=True):
reachability_tests = api.listReachabilityTests(
self.request.user.project_id, bsn_api.Session)
return reachability_tests
class TopTalkersTab(tabs.TableTab):
table_classes = (TopTalkersTable,)
name = _("Top Talkers")
slug = "top_talkers_tab"
template_name = "horizon/common/_detail_table.html"
def get_toptalkers_data(self):
# TODO(kevinbenton): Add an API call to get the data
# to display for Top Talkers table.
services = []
return services
class ConnectionsTabs(tabs.TabGroup):
slug = "connections_tabs"
# TODO(kevinbenton): re-enabled top talkers once implemented
# tabs = (NetworkTemplateTab, ReachabilityTestsTab, TopTalkersTab)
sticky = True
tabs = (ReachabilityTestsTab, NetworkTemplateTab, NetworkTemplateAdminTab)
class CreateNetworkTemplate(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Name"), required=True)
body = forms.CharField(
widget=forms.Textarea(attrs={'rows': 20}),
max_length=None, label=_("Template Body"), required=True)
existing_id = forms.CharField(widget=forms.HiddenInput(), required=False)
template_name = "horizon/common/_detail_table.html"
def handle(self, request, data):
template = network_template_api.get_template_by_id(data['existing_id'])
try:
if template:
network_template_api.update_template_by_id(
data['existing_id'], data['name'], data['body'])
else:
network_template_api.create_network_template(
data['name'], data['body'])
except:
messages.error(
request, _("Unable to create template. "
"Verify that the name is unique."))
return False
messages.success(request, _("Template saved."))
return True
class DetailNetworkTemplate(CreateNetworkTemplate):
def __init__(self, request, *args, **kwargs):
tid = request.path_info.split('/')[-1]
template = network_template_api.get_template_by_id(tid)
super(DetailNetworkTemplate, self).__init__(request, *args, **kwargs)
if template:
self.fields['existing_id'].initial = tid
self.fields['name'].initial = template.template_name
self.fields['body'].initial = template.body
|
|
import os
from math import sqrt
import numpy as np
import scipy.optimize
from scipy.stats import chi2
import matplotlib
matplotlib.use('Agg')
matplotlib.rc('font', **{'family': 'serif', 'serif': ['Computer Modern'], 'size': 15})
matplotlib.rc('text', usetex=True)
import matplotlib.pyplot as plt
from enrico import utils
from enrico import plotting
from enrico import environ
from enrico import lightcurve
from enrico.config import get_config
from enrico.constants import LightcurvePath,FoldedLCPath
from enrico.submit import call
from enrico.RunGTlike import run, GenAnalysisObjects
from enrico import Loggin
from enrico.plotting import plot_bayesianblocks
pol0 = lambda x,p1: p1
pol1 = lambda x,p1,p2: p1+p2*x
class BayesianBlocks(lightcurve.LightCurve):
"""Class to calculate light curves and variability indexes."""
def __init__(self, config, parent_filename=""):
super(BayesianBlocks, self).__init__(config, parent_filename)
self.LCfolder = self.folder+"/BayesianBlocks/"
utils.mkdir_p(self.LCfolder)
# Convert time if necessary
if self.config['time']['type']=='MJD':
self.config['time']['tmin'] = utils.MJD_to_met(self.config['time']['tmin'])
self.config['time']['tmax'] = utils.MJD_to_met(self.config['time']['tmax'])
self.config['time']['type']=='MET'
elif self.config['time']['type']=='JD':
self.config['time']['tmin'] = utils.JD_to_met(self.config['time']['tmin'])
self.config['time']['tmax'] = utils.JD_to_met(self.config['time']['tmax'])
self.config['time']['type']=='MET'
self.tmin = self.config['time']['tmin']
self.tmax = self.config['time']['tmax']
self.p0 = self.config['BayesianBlocks']['p0']
self.config['Spectrum']['FitsGeneration'] = self.config['BayesianBlocks']['FitsGeneration']
self.config['Spectrum']['FrozenSpectralIndex'] = self.config['BayesianBlocks']['SpectralIndex']
self.config['UpperLimit']['TSlimit'] = self.config['BayesianBlocks']['TSLightCurve']
# Check apperture light curve have benn run first
self._CheckAppertureLightCurveFile()
def _CheckAppertureLightCurveFile(self):
''' Check the existance of apperture light curve file (the selected evt list with good gti)'''
import os.path
evtfile = str("%s/AppertureLightCurve/%s_%s_MkTime.fits"%(self.folder,self.srcname,self.Tag))
expfile = str("%s/AppertureLightCurve/%s_%s_applc.fits"%(self.folder,self.srcname,self.Tag))
apfile = str("%s/AppertureLightCurve/TimeExposureCount.txt"%(self.folder))
if not os.path.isfile(evtfile) or not os.path.isfile(expfile) or not os.path.isfile(apfile):
raise Exception('The apperture photometry events list doesn\'t exist\nPlease run enrico_applc first')
def _MakeTimeBins(self):
from astropy.stats import bayesian_blocks
from astropy.table import Table
evtfile = str("%s/AppertureLightCurve/%s_%s_MkTime.fits"%(self.folder,self.srcname,self.Tag))
evtlist = Table.read(evtfile, hdu='EVENTS')['TIME'].data
expfile = str("%s/AppertureLightCurve/%s_%s_applc.fits"%(self.folder,self.srcname,self.Tag))
expbins = Table.read(expfile, hdu='RATE')
meanRate = float(len(evtlist))/float(self.tmax-self.tmin)
print(("Mean photon rate %s s^-1" %meanRate))
print(("Mean photon rate %s day^-1" %(meanRate*3600.*24)))
#Sort table in function of time just to be sure
evtlist.sort()
evtlistExpCorrected = np.empty_like(evtlist)
expbins[expbins.argsort('TIME')]
# Calculate relative exposure time and time correction associated for each exposure bins
j=0
surfaceFermi = 10000 # in cm^2
timeCorrection = np.zeros((len(expbins)+1,2))
exposure = np.zeros(len(expbins))
timeCorrection[j, 0] = expbins['TIME'][j]-0.5*expbins['TIMEDEL'][j]
timeCorrection[j, 1] = 0.
exposure[j] = expbins['EXPOSURE'][j]/(expbins['TIMEDEL'][j]*surfaceFermi)
for j in range(1, len(expbins)):
exposure[j] = expbins['EXPOSURE'][j]/(expbins['TIMEDEL'][j]*surfaceFermi)
timeCorrection[j, 0] = expbins['TIME'][j]-0.5*expbins['TIMEDEL'][j]
timeCorrection[j, 1] = timeCorrection[j-1, 1]+exposure[j-1]*expbins['TIMEDEL'][j-1]
timeCorrection[j+1, 0] = expbins['TIME'][j]+0.5*expbins['TIMEDEL'][j]
timeCorrection[j+1, 1] = timeCorrection[j, 1]+exposure[j]*expbins['TIMEDEL'][j]
#Apply exposure time correction
evtlistcorrected = np.interp(evtlist, timeCorrection[:, 0], timeCorrection[:, 1])
meanRateCorrected = float(len(evtlistcorrected))/float(timeCorrection[-1, 1]-timeCorrection[0, 1])
print(("Mean photon rate exposure corrected %s s^-1" %meanRateCorrected))
print(("Mean photon rate exposure corrected %s day^-1" %(meanRateCorrected*3600.*24)))
#Calculate bayesian block
edgesCorrected = bayesian_blocks(evtlistcorrected, fitness='events', p0=self.p0)
edgesCorrected[0] = timeCorrection[0, 1]
edgesCorrected[-1] = timeCorrection[-1, 1]
#Calculate bin event for apperture photometry
count, tmp = np.histogram(evtlistcorrected, bins=edgesCorrected)
errcount = np.sqrt(count)
#Correct edges from exposure
edges = np.interp(edgesCorrected, timeCorrection[:, 1], timeCorrection[:, 0])
edges[0] = self.tmin
edges[-1] = self.tmax
#Calculate apperture phtometry flux
flux = np.array(count/(edgesCorrected[1:]-edgesCorrected[:-1]))
errflux = np.array(errcount/(edgesCorrected[1:]-edgesCorrected[:-1]))
self.Nbin = len(edges)-1
self.time_array = np.zeros(self.Nbin*2)
self.gtifile = []
for i in range(self.Nbin):
self.time_array[2*i] = edges[i]
self.time_array[2*i+1]= edges[i+1]
self.info("Running LC with "+str(self.Nbin)+" bins")
for i in range(self.Nbin):
print(("Bin ",i," Start=",self.time_array[2*i]," Stop=",self.time_array[2*i+1], 'Apperture Photometry=', flux[i], '+/-', errflux[i], 'ph.s^-1'))
#Dump into ascii
bbfile = str("%s/BayesianBlocks/%s_bb.dat"%(self.folder,self.srcname))
np.savetxt(bbfile, np.transpose(np.array([np.array(edges[:-1]), np.array(edges[1:]), np.array(edgesCorrected[1:]-edgesCorrected[:-1]), np.array(count)])),
header='tstart tend dt_exposure_corrected count')
#Load apperture flux point
time_pt, dTime_pt, flux_pt, errflux_pt = self.readApperturePhotometryPoint()
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$")
plot_bayesianblocks(np.array(edges[:-1]), np.array(edges[1:]),
flux/surfaceFermi, errflux/surfaceFermi, errflux/surfaceFermi,
np.zeros(flux.shape).astype(np.bool))
plt.errorbar(time_pt, flux_pt/surfaceFermi, yerr=errflux_pt/surfaceFermi, xerr=dTime_pt/2., color='k', ls='None')
plt.ylim(ymin=max(plt.ylim()[0],np.percentile(flux/surfaceFermi,1)*0.1),
ymax=min(plt.ylim()[1],np.percentile(flux/surfaceFermi,99)*2.0))
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(np.array(edges[:-1]))-0.02*max(np.array(edges[1:]))),
xmax=min(plt.xlim()[1],1.02*max(np.array(edges[1:]))-0.02*min(np.array(edges[:-1]))))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp(mjdaxis.xaxis.get_majorticklabels(), rotation=15)
plt.tight_layout()
LcOutPath = self.LCfolder + self.config['target']['name']
plt.savefig(LcOutPath+"_AP.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
def _ManageFolder(self,path):
""" All files will be stored in a subfolder name path + NLCbin
Create a subfolder"""
self.config['out'] = self.LCfolder
def _MakeLC(self,Path=LightcurvePath) :
#import gc
import os
#gc.enable()
'''Main function of the Lightcurve script. Read the config file and run the gtlike analysis'''
enricodir = environ.DIRS.get('ENRICO_DIR')
fermidir = environ.DIRS.get('FERMI_DIR')
self.PrepareLC(self.config['BayesianBlocks']['MakeConfFile'])#Get the config file
for i in range(self.Nbin):
#gc.collect()
cmd = str("enrico_sed %s && enrico_plot_bayesianblocks %s" %(self.configfile[i], self.parent_filename))
if self.submit == 'yes':
scriptname = self.LCfolder+"LC_Script_"+str(i)+".sh"
JobLog = self.LCfolder+"LC_Job_"+str(i)+".log"
JobName = (self.config['target']['name'] + "_" +
self.config['analysis']['likelihood'] +
"_LC_" + self.config['file']['tag'])+"_"+str(i)+".log"
call(cmd,enricodir,fermidir,scriptname,JobLog,JobName)#Submit the job
else :
os.system(cmd)
#run(self.configfile[i])#run in command line
def _PlotLC(self,folded=False):
self.info("Reading files produced by enrico")
LcOutPath = self.LCfolder + self.config['target']['name']
#Result are stored into list. This allow to get rid of the bin which failled
Time = []
TimeErr = []
Flux = []
FluxErr = []
# FluxErrChi2 = []
Index = []
IndexErr = []
Cutoff = []
CutoffErr = []
FluxForNpred = []
# FluxErrForNpred = []
Npred = []
Npred_detected_indices = []
TS = []
uplim = []
# Find name used for index parameter
if ((self.config['target']['spectrum'] == 'PowerLaw' or
self.config['target']['spectrum'] == 'PowerLaw2') and
self.config['target']['redshift'] == 0):
IndexName = 'Index'
CutoffName = None
elif (self.config['target']['spectrum'] == 'PLExpCutoff' or
self.config['target']['spectrum'] == 'PLSuperExpCutoff'):
IndexName = 'Index1'
CutoffName = 'Cutoff'
CutoffErrName = 'dCutoff'
else:
IndexName = 'alpha'
CutoffName = None
IndexErrName = 'd' + IndexName
Nfail = 0
for i in range(self.Nbin):
CurConfig = get_config(self.configfile[i])
#Read the result. If it fails, it means that the bins has not bin computed. A warning message is printed
try :
ResultDic = utils.ReadResult(CurConfig)
if ResultDic == {}:
raise(ValueError)
except :
self._errorReading("Fail reading config file",i)
Nfail+=1
continue
#Update the time and time error array
Time.append((ResultDic.get("tmax")+ResultDic.get("tmin"))/2.)
TimeErr.append((ResultDic.get("tmax")-ResultDic.get("tmin"))/2.)
#Check is an ul have been computed. The error is set to zero for the TGraph.
if 'Ulvalue' in ResultDic :
uplim.append(1)
Flux.append(ResultDic.get("Ulvalue"))
# FluxErr.append(0)
# FluxErrChi2.append(ResultDic.get("dFlux"))
# Index.append(ResultDic.get(IndexName))
# IndexErr.append(0)
else :
uplim.append(0)
Flux.append(ResultDic.get("Flux"))
FluxErr.append(ResultDic.get("dFlux"))
# FluxErrChi2.append(ResultDic.get("dFlux"))
Index.append(ResultDic.get(IndexName))
IndexErr.append(ResultDic.get(IndexErrName))
# if CutoffName is not None:
# Cutoff.append(ResultDic.get(CutoffName))
# CutoffErr.append(ResultDic.get(CutoffErrName))
# FluxErrForNpred.append(ResultDic.get("dFlux"))
FluxForNpred.append(ResultDic.get("Flux"))
#Get the Npred and TS values
Npred.append(ResultDic.get("Npred"))
TS.append(ResultDic.get("TS"))
if (CurConfig['BayesianBlocks']['TSLightCurve']<float(ResultDic.get("TS"))):
Npred_detected_indices.append(i-Nfail)
# #change the list into np array
# TS = np.array(TS)
Npred = np.asarray(Npred)
Npred_detected = np.asarray(Npred[Npred_detected_indices])
Time = np.asarray(Time)
TimeErr = np.asarray(TimeErr)
Flux = np.asarray(Flux)
FluxErr = np.asarray(FluxErr)
# Index = np.array(Index)
# IndexErr = np.array(IndexErr)
# Cutoff = np.array(Cutoff)
# CutoffErr = np.array(CutoffErr)
FluxForNpred = np.asarray(FluxForNpred)
# FluxErrForNpred = np.array(FluxErrForNpred)
uplim = np.asarray(uplim,dtype=bool)
#Plots the diagnostic plots is asked
# Plots are : Npred vs flux
# TS vs Time
if self.config['BayesianBlocks']['DiagnosticPlots'] == 'yes' and len(Npred)>0:
#plot Npred vs flux
plt.figure()
NdN = np.asarray(Npred) /np.sqrt(Npred)
FdF = np.asarray(FluxForNpred) / (np.asarray(FluxErr) + 1e-20)
plt.errorbar(NdN, FdF,fmt='+',color='black')
if len(Npred_detected)>2:
NdN = np.asarray(Npred_detected) /np.sqrt(Npred_detected)
FdF = np.asarray(FluxForNpred[Npred_detected_indices]) / (np.asarray(FluxErr[Npred_detected_indices]) + 1e-20)
plt.errorbar(NdN, FdF,fmt='+',color='red')
popt,_ = scipy.optimize.curve_fit(pol1, NdN, FdF, p0=[0,1])#, sigma=dydata)
for i in range(len(FluxForNpred)):
if FluxForNpred[i]/FluxErr[i]>2*pol1(sqrt(Npred[i]),popt[0],popt[1]):
self._errorReading("problem in errors calculation for",i)
print(("Flux +/- error = ",FluxForNpred[i]," +/- ",FluxErr[i]))
print(("V(Npred) = ",sqrt(Npred[i])))
print()
plt.plot(np.array([0,max(NdN)]),pol1(np.array([0,max(NdN)]),popt[0],popt[1]),'--',color='black')
plt.xlabel(r"${\rm Npred/\sqrt{Npred}}$")
plt.ylabel(r"${\rm Flux/\Delta Flux}$")
plt.savefig(LcOutPath+"_Npred.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
else :
print("No Npred Plot produced")
#plot TS vs Time
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"Test Statistic")
plt.errorbar(x=Time,y=TS,xerr=TimeErr,fmt='+',color='black',ls='None')
plt.ylim(ymin=min(TS)*0.8,ymax=max(TS)*1.2)
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time)-0.02*max(Time)),xmax=min(plt.xlim()[1],1.02*max(Time)-0.02*min(Time)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() + r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_TS.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
if len(Time) > 0:
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"${\rm Flux\ (photon\ cm^{-2}\ s^{-1})}$")
plot_bayesianblocks(Time-TimeErr, Time+TimeErr, Flux, FluxErr, FluxErr, uplim)
plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Flux[~uplim],1)*0.1),
ymax=min(plt.ylim()[1],np.percentile(Flux[~uplim],99)*2.0))
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time-TimeErr)-0.02*max(Time+TimeErr)),
xmax=min(plt.xlim()[1],1.02*max(Time+TimeErr)-0.02*min(Time-TimeErr)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_LC.png", dpi=150, facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
else:
print("[BayesianBlocks] Warning : No valid data")
if self.config["BayesianBlocks"]["SpectralIndex"] == 0 :
if len(Time[~uplimIndex]) > 0:
plt.figure()
plt.xlabel(r"Time (s)")
plt.ylabel(r"${\rm Index}$")
Index = np.asarray(Index)
IndexErr = np.asarray(IndexErr)
uplimIndex = uplim #+ Index<0.55
plot_bayesianblocks(Time[~uplimIndex]-TimeErr[~uplimIndex],
Time[~uplimIndex]+TimeErr[~uplimIndex],
Index[~uplimIndex],
IndexErr[~uplimIndex],
IndexErr[~uplimIndex],
uplimIndex[~uplimIndex])
plt.ylim(ymin=max(plt.ylim()[0],np.percentile(Index[~uplimIndex],1)*0.1),
ymax=min(plt.ylim()[1],np.percentile(Index[~uplimIndex],99)*2.0))
plt.xlim(xmin=max(plt.xlim()[0],1.02*min(Time-TimeErr)-0.02*max(Time+TimeErr)),
xmax=min(plt.xlim()[1],1.02*max(Time+TimeErr)-0.02*min(Time-TimeErr)))
# Move the offset to the axis label
ax = plt.gca()
ax.get_yaxis().get_major_formatter().set_useOffset(False)
offset_factor = int(np.mean(np.log10(np.abs(ax.get_ylim()))))
if (offset_factor != 0):
ax.set_yticklabels([float(round(k,5)) \
for k in ax.get_yticks()*10**(-offset_factor)])
ax.yaxis.set_label_text(ax.yaxis.get_label_text() +\
r" [${\times 10^{%d}}$]" %offset_factor)
# Secondary axis with MJD
mjdaxis = ax.twiny()
mjdaxis.set_xlim([utils.met_to_MJD(k) for k in ax.get_xlim()])
mjdaxis.set_xlabel(r"Time (MJD)")
mjdaxis.xaxis.set_major_formatter(matplotlib.ticker.ScalarFormatter(useOffset=False))
plt.setp( mjdaxis.xaxis.get_majorticklabels(), rotation=15 )
plt.tight_layout()
plt.savefig(LcOutPath+"_Index.png", dpi=150,
facecolor='w', edgecolor='w',
orientation='portrait', papertype=None, format=None,
transparent=False, bbox_inches=None, pad_inches=0.1,
frameon=None)
else:
print("[BayesianBlocks] Warning : No valid data")
#Dump into ascii
lcfilename = LcOutPath+"_results.dat"
self.info("Write to Ascii file : "+lcfilename)
lightcurve.WriteToAscii(Time,TimeErr,Flux,FluxErr,Index,IndexErr,
Cutoff,CutoffErr,TS,Npred,lcfilename)
def readApperturePhotometryPoint(self):
apfile = str("%s/AppertureLightCurve/TimeExposureCount.txt"%(self.folder))
time, dTime, exposure, counts = np.loadtxt(apfile, skiprows=1, unpack=True)
time, dTime, exposure, counts = resampleCount(time, dTime, exposure, counts)
errcounts = np.sqrt(counts)
surfaceFermi = 10000 # in cm^2
flux = counts*surfaceFermi/(exposure)
errflux = errcounts*surfaceFermi/(exposure)
time = utils.MJD_to_met(time)
dTime = utils.MJD_to_met(dTime)-utils.MJD_to_met(0.)
return time, dTime, flux, errflux
#Functions in order to resample counts on apperture photometry to achieve a given error
def resampleCount(time, dTime, exposure, counts, errorObj=0.1):
i=1
while i < len(time):
if np.sqrt(float(counts[i-1]))/float(counts[i-1]) > errorObj:
counts[i-1] += counts[i]
time[i-1] -= dTime[i-1]/2.
dTime[i-1] += dTime[i]
time[i-1] += dTime[i-1]/2.
exposure[i-1] += exposure[i]
counts = np.delete(counts, (i), axis=0)
time = np.delete(time, (i), axis=0)
dTime = np.delete(dTime, (i), axis=0)
exposure = np.delete(exposure, (i), axis=0)
else:
i += 1
i = len(time)-1
if len(time) > 1 and np.sqrt(float(counts[i]))/float(counts[i]) > errorObj:
counts[i-1] += counts[i]
time[i-1] -= dTime[i-1]/2.
dTime[i-1] += dTime[i]
time[i-1] += dTime[i-1]/2.
exposure[i-1] += exposure[i]
counts = np.delete(counts, (i), axis=0)
time = np.delete(time, (i), axis=0)
dTime = np.delete(dTime, (i), axis=0)
exposure = np.delete(exposure, (i), axis=0)
return time, dTime, exposure, counts
|
|
import requests
from icalendar import Calendar, Event
import icalendar
import pytz
import sys
import datetime
from lxml import html
import getpass
class Course:
name = ""
code = ""
sections = []
def __init__(self, n, c):
self.name = n
self.code = c
self.sections = []
class Section:
name =""
location = ""
startDateTime = 0
endDateTime =0
lastClass = 0
def __init__(self, _name, _location, _startDateTime, _endDateTime, _lastClass):
self.name = _name
self.location = _location
self.startDateTime = _startDateTime
self.endDateTime = _endDateTime
self.lastClass = _lastClass
def __repr__(self):
return self.name + ", Location: " + self.location + " start Time: " + str(self.startDateTime) + " End Time: " + str(self.endDateTime)
def uwoDaytoWeekDay(day):
if day == "Mo":
return 1
elif day == "Tu":
return 2
elif day == "We":
return 3
elif day == "Th":
return 4
elif day == "Fr":
return 5
def getScheduleHTML(userName, password):
LoginURL = 'https://student.uwo.ca/psp/heprdweb/EMPLOYEE/HRMS/c/UWO_WISG.WSA_STDNT_CENTER.GBL&languageCd=ENG';
schedURL = 'https://student.uwo.ca/psc/heprdweb/EMPLOYEE/HRMS/c/SA_LEARNER_SERVICES.SSR_SSENRL_LIST.GBL';
#These are the post varible the web form sends
payload = {'httpPort2' : '',
'timezoneOffset2':0,
'userid':userName.upper(), #Server expect the username in uppercase for some reason
'pwd':password,
'Submit':'Sign In'}
s = requests.Session(); #this creates a session that will keep track of cookies across requests
#login in
s.post(LoginURL, params=payload);
#get Sched
r = s.post(schedURL, {'Page':'SSR_SSENRL_LIST'})
return r.text
def parseSchedule(schedHTML):
courseList = []
EST = pytz.timezone('US/Eastern')
tree = html.fromstring(schedHTML)
courses = tree.xpath("//div[starts-with(@id,'win0divDERIVED_REGFRM1_DESCR20$')]")
#Parse the raw HTML into objects
for course in courses:
#Get the course title
name = course.xpath("descendant::td[@class = 'PAGROUPDIVIDER']")
dashLoc = name[0].text.find('-')
c = Course(name[0].text[dashLoc+2:].title(), name[0].text[:dashLoc])
#Find the sections we're signed up for
sections = course.xpath("descendant::tr[starts-with(@id,'trCLASS_MTG_VW$')]")
typeClass = ""
for s in sections:
col = s.xpath("descendant::span[@class = 'PSEDITBOX_DISPONLY']")
#We only want to grab the type of class if there is one, otherwise its the same as the last row
if col[1].text.strip() != "":
typeClass = col[1].text
#This the start and end date of the term, not the course
startDate = datetime.datetime.strptime(col[len(col)-1].text[0:10], "%Y/%m/%d")
recurEndDate = datetime.datetime.strptime(col[len(col)-1].text[13:23], "%Y/%m/%d")
#Get The times
times = col[len(col)-3].text;
#Get the day of the week
day = uwoDaytoWeekDay(times[0:2])
#Check to see if the start times hour is 1 digit and pad it
if(times[4] == ':'):
startTime = '0' + times[3:9];
if(times[13] == ':'):
endTime = '0' + times[12:18]
else:
endTime = times[12:19]
else:
startTime = times[3:10];
if(times[14] == ':'):
endTime = '0' + times[13:19]
else:
endTime = times[13:20]
startTime = datetime.datetime.strptime(startTime, "%I:%M%p")
startTime = EST.localize(startTime)
endTime = datetime.datetime.strptime(endTime, "%I:%M%p")
endTime = EST.localize(endTime)
#adjust the start day to line up with the day the course starts(rather the when the term starts)
while (startDate.isoweekday() != day):
startDate = startDate + datetime.timedelta(days=1)
#Figure out the DateTime of the course
zeroed = EST.localize(datetime.datetime(1900, 1, 1))
startDateTime = startDate + (startTime - zeroed)
startDateTime = EST.localize(startDateTime)
endDateTime = startDateTime + (endTime-startTime)
#get Location
location = col[len(col)-2].text
c.sections.append(Section(typeClass, location, startDateTime, endDateTime, recurEndDate))
courseList.append(c)
return courseList
def makeICal(courseList):
# Start building the calendar
cal = Calendar()
#Required to be complient with th RFC
cal.add('prodid', '-//pargall//UWO Class Calendar//EN')
cal.add('version', '2.0')
tzc = icalendar.Timezone()
tzc.add('tzid', 'US/Eastern')
tzc.add('x-lic-location', 'Europe/Eastern')
#Define the timezone that UWO is in
tzs = icalendar.TimezoneStandard()
tzs.add('tzname', 'EST')
tzs.add('dtstart', datetime.datetime(1970, 10, 25, 3, 0, 0))
tzs.add('rrule', {'freq': 'yearly', 'bymonth': 10, 'byday': '-1su'})
tzs.add('TZOFFSETFROM', datetime.timedelta(hours=-5))
tzs.add('TZOFFSETTO', datetime.timedelta(hours=-4))
tzd = icalendar.TimezoneDaylight()
tzd.add('tzname', 'EDT')
tzd.add('dtstart', datetime.datetime(1970, 3, 29, 2, 0, 0))
tzs.add('rrule', {'freq': 'yearly', 'bymonth': 3, 'byday': '-1su'})
tzd.add('TZOFFSETFROM', datetime.timedelta(hours=-4))
tzd.add('TZOFFSETTO', datetime.timedelta(hours=-5))
tzc.add_component(tzs)
tzc.add_component(tzd)
cal.add_component(tzc)
for course in courseList:
for section in course.sections:
event = Event()
event.add('uid', course.name+section.startDateTime.isoformat())
event.add('summary', section.name + ": " + course.name)
event.add('description', course.code)
event.add('location', section.location)
event.add('dtstart', section.startDateTime)
event.add('rrule', {'freq': 'weekly', 'until': section.lastClass})
event.add('dtend', section.endDateTime)
event.add('dtstamp', datetime.datetime.utcnow())
cal.add_component(event)
return cal
def writeTempFile(cal):
import tempfile, os
directory = tempfile.mkdtemp()
f = open(os.path.join(directory, 'classes.ics'), 'wb')
f.write(cal.to_ical())
f.close()
return f.name
def gen():
userName = input('Enter your uwo username: ')
password = getpass.getpass()
schedHTML = getScheduleHTML(userName, password)
courseList = parseSchedule(schedHTML)
cal = makeICal(courseList)
print("Your calendar: " + writeTempFile(cal))
|
|
"""
LICENCE
-------
Copyright 2013 by Kitware, Inc. All Rights Reserved. Please refer to
KITWARE_LICENSE.TXT for licensing information, or contact General Counsel,
Kitware, Inc., 28 Corporate Drive, Clifton Park, NY 12065.
Object for handling signal overrides and managing what signals have been seen
since override.
"""
import logging
import re
import signal
import threading
class SignalHandler(object):
"""
Object for handling knowledge of whether a particular system signal has
been caught for the current thread of execution.
This object is thread safe and acts in a synchronous manner.
"""
def __init__(self, name='SignalHandler'):
self.__signal_caught = {}
self.__s_lock = threading.Lock()
self.__prev_handlers = {}
self.__h_lock = threading.Lock()
self._log = logging.getLogger(__name__ + '.' + name)
# create a mapping of signal integer keys to the signal they represent
# this may change from system to system(?), thus the creating it
# automatically
sig_list = [
re.match('^SIG[A-Z]+$', e) and re.match('^SIG[A-Z]+$', e).group()
for e in dir(signal)
]
sig_list.sort()
sig_list = sig_list[sig_list.count(None):]
self._sig_map = dict([(getattr(signal, sig), sig)
for sig in sig_list])
def _handle_signal(self, signum, stack):
"""
Callback method to be registered to signal.signal for a particular
signal to catch.
"""
# print "Caught signal:", signum
with self.__s_lock:
self._log.debug("'%s' caught", self._sig_map[signum])
self.__signal_caught[signum] = True
def _gen_signal_handle(self, custom_func=None):
"""
Generated a callback method to be registered to signal.signal for a
particular signal to catch.
:param custom_func: A custom signal handle function to add
functionality to our handler.
:type custom_func: (int, None|frame) -> None
:return: Function handle method for registering.
:rtype: types.FunctionType
"""
def handle_signal(signum, stack):
"""
Callback method to be registered to signal.signal for a particular
signal to catch.
:type signum: int
:type stack: None | frame
"""
with self.__s_lock:
self._log.debug("'%s' caught", self._sig_map[signum])
self.__signal_caught[signum] = True
if custom_func:
custom_func(signum, stack)
return handle_signal
def register_signal(self, signum, custom_func=None):
"""
Register a signal to be handled and monitored if not already
registered.
We will override and record the previous handler so that we may put it
back if/when unregister that signal.
A custom handling function may be passed to extend actions taken when
the signal is caught. This method should be of the form of a normal
signal handle method (see python docs). Regardless if a custom function
is given, we will always register in our map that a handler has been
registered for that signal, preventing another handle to be registered
for that signal until the current one is removed, as well as still
registering that the signal has been caught.
If we have already registered the given signal, we return False.
:type signum: int
:param signum: The identifying integer value of the signal to check.
(use signal.SIGINT, signal.SIGTERM, etc.)
:param custom_func: Custom callback function to be called when the
specified signal is caught. This function must take two arguments
where the first is the integer signal identifier and the second
is the stack frame in which the signal was caught.
:type custom_func: (int, frame) -> None
:return: True of we successfully registered a new signal, or False if
the signal is already registered.
"""
# if we haven't already registered a handler for the given signal.
with self.__h_lock:
if signum not in self.__prev_handlers:
self._log.debug("Registering catch for signal %i (%s)",
signum, self._sig_map[signum])
prev_handle = \
signal.signal(signum, self._gen_signal_handle(custom_func))
self.__prev_handlers[signum] = prev_handle
return True
self._log.debug("%s already registered", self._sig_map[signum])
return False
def unregister_signal(self, signum):
"""
Unregister the given signal to the previous handler we have recorded.
This also sets whether the signal has been caught or not to False.
If the given signal is not registered, we return False.
:type signum: int
:param signum: The identifying integer value of the signal to check.
(use signal.SIGINT, signal.SIGTERM, etc.)
return: True if we successfully unregistered a signal, or False if the
the given signal was not registered in the first place.
"""
with self.__h_lock:
if signum in self.__prev_handlers:
self._log.debug("Restoring previous handler for %s",
self._sig_map[signum])
signal.signal(signum, self.__prev_handlers[signum])
del self.__prev_handlers[signum]
# unregister signal caught value
self.reset_signal(signum)
return True
self._log.debug("%s never registered.", self._sig_map[signum])
return False
def signals_registered(self):
"""
Return what signals are currently registered in this handler
:return: what signals are currently registered in this handler
:rtype: list of int
"""
return self.__prev_handlers.keys()
def reset_signal(self, signum):
"""
Reset our knowledge of whether a particular signal has been caught.
This does NOT unregister any handlers.
If the given signal is not registered, we return False.
:type signum: int
:param signum: The identifying integer value of the signal to check.
(use signal.SIGINT, signal.SIGTERM, etc.)
:return: True if the signal catch record was reset, and False if the
given signal is not registered as being monitored.
"""
with self.__s_lock:
if signum in self.__signal_caught:
self._log.debug("Resetting %s catch boolean",
self._sig_map[signum])
del self.__signal_caught[signum]
return True
self._log.debug("%s never caught, nothing to reset",
self._sig_map[signum])
return False
def reset(self):
"""
Reset our knowledge of what signals have been caught. This does NOT
unregister any handlers.
"""
with self.__s_lock:
self._log.debug("Resetting all signal catches")
self.__signal_caught = {}
def is_signal_caught(self, signum):
"""
Check if we have caught the given signal since the creation of the
object or since the last reset. We also return false if the given
signal isn't being monitored.
:type signum: int
:param signum: The identifying integer value of the signal to check.
(use signal.SIGINT, signal.SIGTERM, etc.)
:rtype: bool
:return; True if the signal has been seen since creation/reset, or
False otherwise.
"""
return self.__signal_caught.get(signum, False)
|
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import ast
import datetime
import logging
import os
import re
import socket
import time
# We can remove ExpatError when we drop support for Python 2.6:
from xml.parsers.expat import ExpatError
from tornado import gen, httpclient
from tornado import ioloop as ioloop_module
from tornado import log as tornado_log
try:
from xml.etree import ElementTree as ET
except ImportError:
try:
from xml.etree import cElementTree as ET
except ImportError:
from xml.etree import ElementTree as ET
# Remove this when we drop Python 2.6:
ParseError = getattr(ET, 'ParseError', SyntaxError)
try:
# Prefer simplejson, if installed.
import simplejson as json
except ImportError:
import json
try:
# Python 3.X
from urllib.parse import urlencode
except ImportError:
# Python 2.X
from urllib import urlencode
try:
# Python 3.X
import html.entities as htmlentities
except ImportError:
# Python 2.X
import htmlentitydefs as htmlentities
try:
# Python 2.X
unicode_char = unichr
except NameError:
# Python 3.X
unicode_char = chr
# Ugh.
long = int
__all__ = ['Solr']
def get_version():
return "%s.%s.%s" % __version__[:3]
DATETIME_REGEX = re.compile(r'^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d+)?Z$')
class NullHandler(logging.Handler):
def emit(self, record):
pass
# set up logging with Tornado
LOG = tornado_log.app_log
# For debugging...
if os.environ.get("DEBUG_PYSOLR", "").lower() in ("true", "1"):
LOG.setLevel(logging.DEBUG)
stream = logging.StreamHandler()
LOG.addHandler(stream)
def is_py3():
try:
basestring # pylint: disable=pointless-statement
return False
except NameError:
return True
IS_PY3 = is_py3()
def force_unicode(value):
"""
Forces a bytestring to become a Unicode string.
"""
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = value.decode('utf-8', errors='replace')
elif not isinstance(value, str):
value = str(value)
else:
# Python 2.X
if isinstance(value, str):
value = value.decode('utf-8', 'replace')
elif not isinstance(value, basestring):
value = unicode(value)
return value
def force_bytes(value):
"""
Forces a Unicode string to become a bytestring.
"""
if IS_PY3:
if isinstance(value, str):
value = value.encode('utf-8', 'backslashreplace')
else:
if isinstance(value, unicode):
value = value.encode('utf-8')
return value
def unescape_html(text):
"""
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Source: http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unicode_char(int(text[3:-1], 16))
else:
return unicode_char(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unicode_char(htmlentities.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub(r"&#?\w+;", fixup, text)
def safe_urlencode(params, doseq=0):
"""
UTF-8-safe version of safe_urlencode
The stdlib safe_urlencode prior to Python 3.x chokes on UTF-8 values
which can't fail down to ascii.
"""
if IS_PY3:
return urlencode(params, doseq)
if hasattr(params, "items"):
params = params.items()
new_params = list()
for k, v in params:
k = k.encode("utf-8")
if isinstance(v, (list, tuple)):
new_params.append((k, [force_bytes(i) for i in v]))
else:
new_params.append((k, force_bytes(v)))
return urlencode(new_params, doseq)
def is_valid_xml_char_ordinal(i):
"""
Defines whether char is valid to use in xml document
XML standard defines a valid char as::
Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
"""
return ( # conditions ordered by presumed frequency
0x20 <= i <= 0xD7FF
or i in (0x9, 0xA, 0xD)
or 0xE000 <= i <= 0xFFFD
or 0x10000 <= i <= 0x10FFFF
)
def clean_xml_string(s):
"""
Cleans string from invalid xml chars
Solution was found there::
http://stackoverflow.com/questions/8733233/filtering-out-certain-bytes-in-python
"""
return ''.join(c for c in s if is_valid_xml_char_ordinal(ord(c)))
class SolrError(Exception):
pass
class Results(object):
"""
Default results class for wrapping decoded (from JSON) solr responses.
Required ``decoded`` argument must be a Solr response dictionary. Individual documents can be
retrieved either through the :attr:`Results.docs` attribute, through indexed access, or through
iteration.
Example::
results = Results({
'response': {
'docs': [{'id': 1}, {'id': 2}, {'id': 3}],
'numFound': 3,
}
})
# You can iterate the "docs" by simply iterating the object itself, so this:
for doc in results:
print(str(doc))
# ... is equivalent to this:
for doc in results.docs:
print(str(doc))
# And these are equal too.
list(results) == results.docs
# You can also do indexed access.
results[1] == results.docs[1]
# You can also test for truth as you would expect:
bool(results) == True
# But with zero documents, it's false:
bool(Results({})) == False
**Additional Response Keys**
The following additional data members are available:
- hits
- debug
- highlighting
- facets
- spellcheck
- stats
- qtime
- grouped
- nextCursorMark
You may add more attributes by extending the :class:`Results` class. For example:::
class CustomResults(pysolrtornado.Results):
def __init__(self, decoded):
super(self, CustomResults).__init__(decoded)
self.some_new_attribute = decoded.get('not_covered_key' None)
"""
def __init__(self, decoded):
# main response part of decoded Solr response
response_part = decoded.get('response') or {}
self.docs = response_part.get('docs', ())
self.hits = int(response_part.get('numFound', 0))
# other response metadata
self.debug = decoded.get('debug', {})
self.highlighting = decoded.get('highlighting', {})
self.facets = decoded.get('facet_counts', {})
self.spellcheck = decoded.get('spellcheck', {})
self.stats = decoded.get('stats', {})
self.qtime = decoded.get('responseHeader', {}).get('QTime', None)
self.grouped = decoded.get('grouped', {})
self.nextCursorMark = decoded.get('nextCursorMark', None)
def __bool__(self):
return self.hits > 0
def __len__(self):
return len(self.docs)
def __iter__(self):
return iter(self.docs)
def __getitem__(self, i):
return self.docs[i]
class Solr(object):
"""
The main object for working with Solr.
Optionally accepts ``decoder`` for an alternate JSON decoder instance.
Default is ``json.JSONDecoder()``.
Optionally accepts ``timeout`` for wait seconds until giving up on a
request. Default is ``60`` seconds.
Optionally accepts ``ioloop`` used for the AsyncHTTPClient. **But you should really include it
because I don't know if it will work without being given that... TBD.**
Optionally accepts ``results_cls`` that specifies class of results object
returned by ``.search()`` and ``.more_like_this()`` methods.
Default is ``pysolr.Results``.
Usage::
solr = pysolr.Solr('http://localhost:8983/solr')
# With a 10 second timeout.
solr = pysolr.Solr('http://localhost:8983/solr', timeout=10)
# with a dict as a default results class instead of pysolr.Results
solr = pysolr.Solr('http://localhost:8983/solr', results_cls=dict)
"""
# Error messages for Solr._send_request()
# They're class-level so they may be translated easier.
_FETCH_VALUE_ERROR = 'URL is empty or protocol missing: {}'
_FETCH_UNICODE_ERROR = 'URL is too long: {}'
_FETCH_SOCKET_ERROR = 'Socket error (DNS?) connecting to {}'
_FETCH_KEY_ERROR = 'Unknown HTTP method "{}"'
_FETCH_CONN_ERROR = 'Connection error with {}'
def __init__(self, url, decoder=None, timeout=None, ioloop=None, results_cls=None):
self.decoder = decoder or json.JSONDecoder()
self.url = url
self.timeout = timeout or 60
self.log = self._get_log()
self._ioloop = ioloop or ioloop_module.IOLoop.instance()
self._client = httpclient.AsyncHTTPClient(self._ioloop)
self.results_cls = results_cls or Results
def _get_log(self):
return LOG
def _create_full_url(self, path=''):
if len(path):
return '/'.join([self.url.rstrip('/'), path.lstrip('/')])
# No path? No problem.
return self.url
@gen.coroutine
def _send_request(self, method, path='', body=None, headers=None, files=None):
url = self._create_full_url(path)
method = method.upper()
log_body = body
if headers is None:
headers = {}
if log_body is None:
log_body = ''
elif not isinstance(log_body, str):
log_body = repr(body)
self.log.debug("Starting request to '%s' (%s) with body '%s'...",
url, method, log_body[:10])
start_time = time.time()
if files is not None:
raise NotImplementedError('The "files" parameter in _send_request() does not work in Tornado yet')
# actual Tornado request
# Everything except the body can be Unicode. The body must be
# encoded to bytes to work properly on Py3.
bytes_body = body
if bytes_body is not None:
bytes_body = force_bytes(body)
# prepare the request
request = httpclient.HTTPRequest(url, method=method, headers=headers, body=bytes_body,
request_timeout=self.timeout)
try:
# run the request
resp = yield self._client.fetch(request)
except UnicodeError:
# when the URL is empty or too long or something
# NOTE: must come before ValueError, since UnicodeError is a subclass of ValueError
raise SolrError(Solr._FETCH_UNICODE_ERROR.format(url))
except ValueError:
# when the URL is empty or the HTTP/HTTPS part is missing
raise SolrError(Solr._FETCH_VALUE_ERROR.format(url))
except socket.gaierror:
# DNS doesn't resolve or simlar
raise SolrError(Solr._FETCH_SOCKET_ERROR.format(url))
except KeyError:
# unknown HTTP method
raise SolrError(Solr._FETCH_KEY_ERROR.format(method))
except ConnectionError:
# could be various things
raise SolrError(Solr._FETCH_CONN_ERROR.format(url))
except httpclient.HTTPError as the_error:
# Solr returned an error
# TODO: this fails with a 599 (timeout, or something else when there was no HTTP response at all)
error_message = '{}: {}'.format(the_error.code, the_error.response.reason)
self.log.error(error_message, extra={'data': {'headers': the_error.response,
'response': the_error.response}})
raise SolrError(error_message)
end_time = time.time()
self.log.info("Finished '%s' (%s) with body '%s' in %0.3f seconds.",
url, method, log_body[:10], end_time - start_time)
return force_unicode(resp.body)
@gen.coroutine
def _select(self, params):
# specify json encoding of results
params['wt'] = 'json'
params_encoded = safe_urlencode(params, True)
if len(params_encoded) < 1024:
# Typical case.
path = 'select/?%s' % params_encoded
return (yield self._send_request('get', path))
else:
# Handles very long queries by submitting as a POST.
path = 'select/'
headers = {
'Content-type': 'application/x-www-form-urlencoded; charset=utf-8',
}
return (yield self._send_request('post', path, body=params_encoded, headers=headers))
@gen.coroutine
def _mlt(self, params):
# specify json encoding of results
params['wt'] = 'json'
path = 'mlt/?%s' % safe_urlencode(params, True)
return (yield self._send_request('get', path))
@gen.coroutine
def _suggest_terms(self, params):
# specify json encoding of results
params['wt'] = 'json'
path = 'terms/?%s' % safe_urlencode(params, True)
return (yield self._send_request('get', path))
@gen.coroutine
def _update(self, message, clean_ctrl_chars=True, commit=True, softCommit=False, waitFlush=None, waitSearcher=None):
"""
Posts the given xml message to http://<self.url>/update and
returns the result.
Passing `sanitize` as False will prevent the message from being cleaned
of control characters (default True). This is done by default because
these characters would cause Solr to fail to parse the XML. Only pass
False if you're positive your data is clean.
"""
path = 'update/'
# Per http://wiki.apache.org/solr/UpdateXmlMessages, we can append a
# ``commit=true`` to the URL and have the commit happen without a
# second request.
query_vars = []
if commit is not None:
query_vars.append('commit=%s' % str(bool(commit)).lower())
elif softCommit is not None:
query_vars.append('softCommit=%s' % str(bool(softCommit)).lower())
if waitFlush is not None:
query_vars.append('waitFlush=%s' % str(bool(waitFlush)).lower())
if waitSearcher is not None:
query_vars.append('waitSearcher=%s' % str(bool(waitSearcher)).lower())
if query_vars:
path = '%s?%s' % (path, '&'.join(query_vars))
# Clean the message of ctrl characters.
if clean_ctrl_chars:
message = sanitize(message)
return (yield self._send_request('post', path, message, {'Content-type': 'text/xml; charset=utf-8'}))
# TODO: convert to @staticmethod
def _extract_error(self, resp):
"""
Extract the actual error message from a solr response.
"""
return '[Reason: {}]'.format(resp.reason)
# TODO: convert to @staticmethod
def _scrape_response(self, headers, response):
"""
Scrape the html response.
"""
# identify the responding server
server_type = None
server_string = headers.get('server', '')
if server_string and 'jetty' in server_string.lower():
server_type = 'jetty'
if server_string and 'coyote' in server_string.lower():
server_type = 'tomcat'
reason = None
full_html = ''
dom_tree = None
# In Python3, response can be made of bytes
if IS_PY3 and hasattr(response, 'decode'):
response = response.decode()
if response.startswith('<?xml'):
# Try a strict XML parse
try:
soup = ET.fromstring(response)
reason_node = soup.find('lst[@name="error"]/str[@name="msg"]')
tb_node = soup.find('lst[@name="error"]/str[@name="trace"]')
if reason_node is not None:
full_html = reason = reason_node.text.strip()
if tb_node is not None:
full_html = tb_node.text.strip()
if reason is None:
reason = full_html
# Since we had a precise match, we'll return the results now:
if reason and full_html:
return reason, full_html
except (ParseError, ExpatError):
# XML parsing error, so we'll let the more liberal code handle it.
pass
if server_type == 'tomcat':
# Tomcat doesn't produce a valid XML response or consistent HTML:
m = re.search(r'<(h1)[^>]*>\s*(.+?)\s*</\1>', response, re.IGNORECASE)
if m:
reason = m.group(2)
else:
full_html = "%s" % response
else:
# Let's assume others do produce a valid XML response
try:
dom_tree = ET.fromstring(response)
reason_node = None
# html page might be different for every server
if server_type == 'jetty':
reason_node = dom_tree.find('body/pre')
else:
reason_node = dom_tree.find('head/title')
if reason_node is not None:
reason = reason_node.text
if reason is None:
full_html = ET.tostring(dom_tree)
except (SyntaxError, ExpatError):
full_html = "%s" % response
full_html = force_unicode(full_html)
full_html = full_html.replace('\n', '')
full_html = full_html.replace('\r', '')
full_html = full_html.replace('<br/>', '')
full_html = full_html.replace('<br />', '')
full_html = full_html.strip()
return reason, full_html
# Conversion #############################################################
# TODO: convert to @staticmethod
def _from_python(self, value):
"""
Converts python values to a form suitable for insertion into the xml
we send to solr.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
value = "%sZ" % value.isoformat()
else:
value = "%sT00:00:00Z" % value.isoformat()
elif isinstance(value, bool):
if value:
value = 'true'
else:
value = 'false'
else:
if IS_PY3:
# Python 3.X
if isinstance(value, bytes):
value = str(value, errors='replace')
else:
# Python 2.X
if isinstance(value, str):
value = unicode(value, errors='replace')
value = "{0}".format(value)
return clean_xml_string(value)
# TODO: convert to @staticmethod
def _to_python(self, value):
"""
Converts values from Solr to native Python values.
"""
if isinstance(value, (int, float, long, complex)):
return value
if isinstance(value, (list, tuple)):
value = value[0]
if value == 'true':
return True
elif value == 'false':
return False
is_string = False
if IS_PY3:
if isinstance(value, bytes):
value = force_unicode(value)
if isinstance(value, str):
is_string = True
else:
if isinstance(value, str):
value = force_unicode(value)
if isinstance(value, basestring):
is_string = True
if is_string == True:
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime.datetime(date_values['year'], date_values['month'], date_values['day'], date_values['hour'], date_values['minute'], date_values['second'])
try:
# This is slightly gross but it's hard to tell otherwise what the
# string's original type might have been.
return ast.literal_eval(value)
except (ValueError, SyntaxError):
# If it fails, continue on.
pass
return value
# TODO: convert to @staticmethod
def _is_null_value(self, value):
"""
Check if a given value is ``null``.
Criteria for this is based on values that shouldn't be included
in the Solr ``add`` request at all.
"""
if value is None:
return True
if IS_PY3:
# Python 3.X
if isinstance(value, str) and len(value) == 0:
return True
else:
# Python 2.X
if isinstance(value, basestring) and len(value) == 0:
return True
# TODO: This should probably be removed when solved in core Solr level?
return False
# API Methods ############################################################
@gen.coroutine
def search(self, q, **kwargs):
"""
Performs a search and returns the results.
Requires a ``q`` for a string version of the query to run.
Optionally accepts ``**kwargs`` for additional options to be passed
through the Solr URL.
Using the ``df`` keyword argument (specifying a default field) is strongly recommended, and
indeed required for Solr 5.
Returns ``self.results_cls`` class object (defaults to
``pysolr.Results``)
Usage::
# All docs.
results = solr.search('*:*')
# Search with highlighting.
results = solr.search('ponies', **{
'hl': 'true',
'hl.fragsize': 10,
})
"""
params = {'q': q}
params.update(kwargs)
response = yield self._select(params)
decoded = self.decoder.decode(response)
self.log.debug(
"Found '%s' search results.",
# cover both cases: there is no response key or value is None
(decoded.get('response', {}) or {}).get('numFound', 0)
)
return self.results_cls(decoded)
@gen.coroutine
def more_like_this(self, q, mltfl, **kwargs):
"""
Finds and returns results similar to the provided query.
Returns ``self.results_cls`` class object (defaults to
``pysolr.Results``)
Requires Solr 1.3+.
Usage::
similar = solr.more_like_this('id:doc_234', 'text')
"""
params = {
'q': q,
'mlt.fl': mltfl,
}
params.update(kwargs)
response = yield self._mlt(params)
decoded = self.decoder.decode(response)
self.log.debug(
"Found '%s' MLT results.",
# cover both cases: there is no response key or value is None
(decoded.get('response', {}) or {}).get('numFound', 0)
)
return self.results_cls(decoded)
@gen.coroutine
def suggest_terms(self, fields, prefix, **kwargs):
"""
Accepts a list of field names and a prefix
Returns a dictionary keyed on field name containing a list of
``(term, count)`` pairs
Requires Solr 1.4+.
"""
params = {
'terms.fl': fields,
'terms.prefix': prefix,
}
params.update(kwargs)
response = yield self._suggest_terms(params)
result = self.decoder.decode(response)
terms = result.get("terms", {})
res = {}
# in Solr 1.x the value of terms is a flat list:
# ["field_name", ["dance",23,"dancers",10,"dancing",8,"dancer",6]]
#
# in Solr 3.x the value of terms is a dict:
# {"field_name": ["dance",23,"dancers",10,"dancing",8,"dancer",6]}
if isinstance(terms, (list, tuple)):
terms = dict(zip(terms[0::2], terms[1::2]))
for field, values in terms.items():
tmp = list()
while values:
tmp.append((values.pop(0), values.pop(0)))
res[field] = tmp
self.log.debug("Found '%d' Term suggestions results.", sum(len(j) for i, j in res.items()))
return res
# TODO: convert to @staticmethod
def _build_doc(self, doc, boost=None, fieldUpdates=None):
doc_elem = ET.Element('doc')
for key, value in doc.items():
if key == 'boost':
doc_elem.set('boost', force_unicode(value))
continue
# To avoid multiple code-paths we'd like to treat all of our values as iterables:
if isinstance(value, (list, tuple)):
values = value
else:
values = (value, )
for bit in values:
if self._is_null_value(bit):
continue
attrs = {'name': key}
if fieldUpdates and key in fieldUpdates:
attrs['update'] = fieldUpdates[key]
if boost and key in boost:
attrs['boost'] = force_unicode(boost[key])
field = ET.Element('field', **attrs)
field.text = self._from_python(bit)
doc_elem.append(field)
return doc_elem
@gen.coroutine
def add(self, docs, boost=None, fieldUpdates=None, commit=None, softCommit=None, commitWithin=None, waitFlush=None, waitSearcher=None):
"""
Adds or updates documents.
Requires ``docs``, which is a list of dictionaries. Each key is the
field name and each value is the value to index.
Optionally accepts ``commit``. Default is ``True``.
Optionally accepts ``softCommit``. Default is ``False``.
Optionally accepts ``boost``. Default is ``None``.
Optionally accepts ``fieldUpdates``. Default is ``None``.
Optionally accepts ``commitWithin``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.add([
{
"id": "doc_1",
"title": "A test document",
},
{
"id": "doc_2",
"title": "The Banana: Tasty or Dangerous?",
},
])
"""
commit = True if commit is None else commit
softCommit = False if softCommit is None else softCommit
start_time = time.time()
self.log.debug("Starting to build add request...")
message = ET.Element('add')
if commitWithin:
message.set('commitWithin', commitWithin)
for doc in docs:
message.append(self._build_doc(doc, boost=boost, fieldUpdates=fieldUpdates))
# This returns a bytestring. Ugh.
m = ET.tostring(message, encoding='utf-8')
# Convert back to Unicode please.
m = force_unicode(m)
end_time = time.time()
self.log.debug("Built add request of %s docs in %0.2f seconds.", len(message), end_time - start_time)
return (yield self._update(m, commit=commit, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher))
@gen.coroutine
def delete(self, id=None, q=None, commit=True, waitFlush=None, waitSearcher=None): # pylint: disable=redefined-builtin
"""
Deletes documents.
Requires *either* ``id`` or ``query``. ``id`` is if you know the
specific document id to remove. ``query`` is a Lucene-style query
indicating a collection of documents to delete.
Optionally accepts ``commit``. Default is ``True``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.delete(id='doc_12')
solr.delete(q='*:*')
"""
if id is None and q is None:
raise ValueError('You must specify "id" or "q".')
elif id is not None and q is not None:
raise ValueError('You many only specify "id" OR "q", not both.')
elif id is not None:
m = '<delete><id>%s</id></delete>' % id
elif q is not None:
m = '<delete><query>%s</query></delete>' % q
return (yield self._update(m, commit=commit, waitFlush=waitFlush, waitSearcher=waitSearcher))
@gen.coroutine
def commit(self, softCommit=False, waitFlush=None, waitSearcher=None, expungeDeletes=None):
"""
Forces Solr to write the index data to disk.
Optionally accepts ``expungeDeletes``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Optionally accepts ``softCommit``. Default is ``False``.
Usage::
solr.commit()
"""
if expungeDeletes is not None:
msg = '<commit expungeDeletes="%s" />' % str(bool(expungeDeletes)).lower()
else:
msg = '<commit />'
return (yield self._update(msg, softCommit=softCommit, waitFlush=waitFlush, waitSearcher=waitSearcher))
@gen.coroutine
def optimize(self, waitFlush=None, waitSearcher=None, maxSegments=None):
"""
Tells Solr to streamline the number of segments used, essentially a
defragmentation operation.
Optionally accepts ``maxSegments``. Default is ``None``.
Optionally accepts ``waitFlush``. Default is ``None``.
Optionally accepts ``waitSearcher``. Default is ``None``.
Usage::
solr.optimize()
"""
if maxSegments:
msg = '<optimize maxSegments="%d" />' % maxSegments
else:
msg = '<optimize />'
return (yield self._update(msg, waitFlush=waitFlush, waitSearcher=waitSearcher))
def extract(self, file_obj, extractOnly=True, **kwargs):
"""
.. warning:: This method is not implemented yet in ``pysolr-tornado``.
POSTs a file to the Solr ExtractingRequestHandler so rich content can
be processed using Apache Tika. See the Solr wiki for details:
http://wiki.apache.org/solr/ExtractingRequestHandler
The ExtractingRequestHandler has a very simple model: it extracts
contents and metadata from the uploaded file and inserts it directly
into the index. This is rarely useful as it allows no way to store
additional data or otherwise customize the record. Instead, by default
we'll use the extract-only mode to extract the data without indexing it
so the caller has the opportunity to process it as appropriate; call
with ``extractOnly=False`` if you want to insert with no additional
processing.
Returns None if metadata cannot be extracted; otherwise returns a
dictionary containing at least two keys:
:contents:
Extracted full-text content, if applicable
:metadata:
key:value pairs of text strings
"""
raise NotImplementedError('extract() has not been ported to Tornado yet')
#if not hasattr(file_obj, "name"):
#raise ValueError("extract() requires file-like objects which have a defined name property")
#params = {
#"extractOnly": "true" if extractOnly else "false",
#"lowernames": "true",
#"wt": "json",
#}
#params.update(kwargs)
#try:
## We'll provide the file using its true name as Tika may use that
## as a file type hint:
#resp = self._send_request('post', 'update/extract',
#body=params,
#files={'file': (file_obj.name, file_obj)})
#except (IOError, SolrError) as err:
#self.log.error("Failed to extract document metadata: %s", err,
#exc_info=True)
#raise
#try:
#data = json.loads(resp)
#except ValueError as err:
#self.log.error("Failed to load JSON response: %s", err,
#exc_info=True)
#raise
#data['contents'] = data.pop(file_obj.name, None)
#data['metadata'] = metadata = {}
#raw_metadata = data.pop("%s_metadata" % file_obj.name, None)
#if raw_metadata:
## The raw format is somewhat annoying: it's a flat list of
## alternating keys and value lists
#while raw_metadata:
#metadata[raw_metadata.pop()] = raw_metadata.pop()
#return data
class SolrCoreAdmin(object):
"""
Handles core admin operations: see http://wiki.apache.org/solr/CoreAdmin
Operations offered by Solr are:
1. STATUS
2. CREATE
3. RELOAD
4. RENAME
5. ALIAS
6. SWAP
7. UNLOAD
8. LOAD (not currently implemented)
"""
def __init__(self, url, *args, **kwargs):
super(SolrCoreAdmin, self).__init__(*args, **kwargs)
self.url = url
def _get_url(self, url, params=None, headers=None):
params = {} if params is None else params
headers = {} if headers is None else headers
my_client = httpclient.HTTPClient()
try:
resp = my_client.fetch(httpclient.HTTPRequest(url,
headers=headers,
body=safe_urlencode(params),
allow_nonstandard_methods=True))
finally:
my_client.close()
return force_unicode(resp.body)
def status(self, core=None):
"""http://wiki.apache.org/solr/CoreAdmin#head-9be76f5a459882c5c093a7a1456e98bea7723953"""
params = {
'action': 'STATUS',
}
if core is not None:
params.update(core=core)
return self._get_url(self.url, params=params)
def create(self, name, instance_dir=None, config='solrconfig.xml', schema='schema.xml'):
"""http://wiki.apache.org/solr/CoreAdmin#head-7ca1b98a9df8b8ca0dcfbfc49940ed5ac98c4a08"""
params = {
'action': 'CREATE',
'name': name,
'config': config,
'schema': schema,
}
if instance_dir is None:
params.update(instanceDir=name)
else:
params.update(instanceDir=instance_dir)
return self._get_url(self.url, params=params)
def reload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-3f125034c6a64611779442539812067b8b430930"""
params = {
'action': 'RELOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def rename(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-9473bee1abed39e8583ba45ef993bebb468e3afe"""
params = {
'action': 'RENAME',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def swap(self, core, other):
"""http://wiki.apache.org/solr/CoreAdmin#head-928b872300f1b66748c85cebb12a59bb574e501b"""
params = {
'action': 'SWAP',
'core': core,
'other': other,
}
return self._get_url(self.url, params=params)
def unload(self, core):
"""http://wiki.apache.org/solr/CoreAdmin#head-f5055a885932e2c25096a8856de840b06764d143"""
params = {
'action': 'UNLOAD',
'core': core,
}
return self._get_url(self.url, params=params)
def load(self, core):
raise NotImplementedError('Solr 1.4 and below do not support this operation.')
# Using two-tuples to preserve order.
REPLACEMENTS = (
# Nuke nasty control characters.
(b'\x00', b''), # Start of heading
(b'\x01', b''), # Start of heading
(b'\x02', b''), # Start of text
(b'\x03', b''), # End of text
(b'\x04', b''), # End of transmission
(b'\x05', b''), # Enquiry
(b'\x06', b''), # Acknowledge
(b'\x07', b''), # Ring terminal bell
(b'\x08', b''), # Backspace
(b'\x0b', b''), # Vertical tab
(b'\x0c', b''), # Form feed
(b'\x0e', b''), # Shift out
(b'\x0f', b''), # Shift in
(b'\x10', b''), # Data link escape
(b'\x11', b''), # Device control 1
(b'\x12', b''), # Device control 2
(b'\x13', b''), # Device control 3
(b'\x14', b''), # Device control 4
(b'\x15', b''), # Negative acknowledge
(b'\x16', b''), # Synchronous idle
(b'\x17', b''), # End of transmission block
(b'\x18', b''), # Cancel
(b'\x19', b''), # End of medium
(b'\x1a', b''), # Substitute character
(b'\x1b', b''), # Escape
(b'\x1c', b''), # File separator
(b'\x1d', b''), # Group separator
(b'\x1e', b''), # Record separator
(b'\x1f', b''), # Unit separator
)
def sanitize(data):
fixed_string = force_bytes(data)
for bad, good in REPLACEMENTS:
fixed_string = fixed_string.replace(bad, good)
return force_unicode(fixed_string)
|
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# Copyright 2013 NTT corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from glance.api import policy
from glance.api.v1 import controller
from glance.common import exception
from glance.common import utils
from glance.common import wsgi
import glance.openstack.common.log as logging
import glance.registry.client.v1.api as registry
LOG = logging.getLogger(__name__)
class Controller(controller.BaseController):
def __init__(self):
self.policy = policy.Enforcer()
def _check_can_access_image_members(self, context):
if context.owner is None and not context.is_admin:
raise webob.exc.HTTPUnauthorized(_("No authenticated user"))
def _enforce(self, req, action):
"""Authorize an action against our policies"""
try:
self.policy.enforce(req.context, action, {})
except exception.Forbidden:
raise webob.exc.HTTPForbidden()
def _raise_404_if_image_deleted(self, req, image_id):
image = self.get_image_meta_or_404(req, image_id)
if image['status'] == 'deleted':
msg = _("Image with identifier %s has been deleted.") % image_id
raise webob.exc.HTTPNotFound(msg)
def index(self, req, image_id):
"""
Return a list of dictionaries indicating the members of the
image, i.e., those tenants the image is shared with.
:param req: the Request object coming from the wsgi layer
:param image_id: The opaque image identifier
:retval The response body is a mapping of the following form::
{'members': [
{'member_id': <MEMBER>,
'can_share': <SHARE_PERMISSION>, ...}, ...
]}
"""
self._enforce(req, 'get_members')
self._raise_404_if_image_deleted(req, image_id)
try:
members = registry.get_image_members(req.context, image_id)
except exception.NotFound:
msg = _("Image with identifier %s not found") % image_id
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden:
msg = _("Unauthorized image access")
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg)
return dict(members=members)
@utils.mutating
def delete(self, req, image_id, id):
"""
Removes a membership from the image.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'delete_member')
self._raise_404_if_image_deleted(req, image_id)
try:
registry.delete_member(req.context, image_id, id)
self._update_store_acls(req, image_id)
except exception.NotFound as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
return webob.exc.HTTPNoContent()
def default(self, req, image_id, id, body=None):
"""This will cover the missing 'show' and 'create' actions"""
raise webob.exc.HTTPMethodNotAllowed()
@utils.mutating
def update(self, req, image_id, id, body=None):
"""
Adds a membership to the image, or updates an existing one.
If a body is present, it is a dict with the following format::
{"member": {
"can_share": [True|False]
}}
If "can_share" is provided, the member's ability to share is
set accordingly. If it is not provided, existing memberships
remain unchanged and new memberships default to False.
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
# Figure out can_share
can_share = None
if body and 'member' in body and 'can_share' in body['member']:
can_share = bool(body['member']['can_share'])
try:
registry.add_member(req.context, image_id, id, can_share)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
return webob.exc.HTTPNoContent()
@utils.mutating
def update_all(self, req, image_id, body):
"""
Replaces the members of the image with those specified in the
body. The body is a dict with the following format::
{"memberships": [
{"member_id": <MEMBER_ID>,
["can_share": [True|False]]}, ...
]}
"""
self._check_can_access_image_members(req.context)
self._enforce(req, 'modify_member')
self._raise_404_if_image_deleted(req, image_id)
try:
registry.replace_members(req.context, image_id, body)
self._update_store_acls(req, image_id)
except exception.Invalid as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
except exception.NotFound as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
return webob.exc.HTTPNoContent()
def index_shared_images(self, req, id):
"""
Retrieves list of image memberships for the given member.
:param req: the Request object coming from the wsgi layer
:param id: the opaque member identifier
:retval The response body is a mapping of the following form::
{'shared_images': [
{'image_id': <IMAGE>,
'can_share': <SHARE_PERMISSION>, ...}, ...
]}
"""
try:
members = registry.get_member_images(req.context, id)
except exception.NotFound as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPNotFound(msg)
except exception.Forbidden as e:
msg = "%s" % e
LOG.debug(msg)
raise webob.exc.HTTPForbidden(msg)
return dict(shared_images=members)
def _update_store_acls(self, req, image_id):
image_meta = self.get_image_meta_or_404(req, image_id)
location_uri = image_meta.get('location')
public = image_meta.get('is_public')
self.update_store_acls(req, image_id, location_uri, public)
def create_resource():
"""Image members resource factory method"""
deserializer = wsgi.JSONRequestDeserializer()
serializer = wsgi.JSONResponseSerializer()
return wsgi.Resource(Controller(), deserializer, serializer)
|
|
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
import copy
import re
import sys
import gzip
import traceback
import six
import orjson
import jsonschema
from oslo_config import cfg
from six.moves import http_client
from mongoengine.queryset.visitor import Q
from st2api.controllers.base import BaseRestControllerMixin
from st2api.controllers.resource import ResourceController
from st2api.controllers.resource import BaseResourceIsolationControllerMixin
from st2api.controllers.v1.execution_views import ExecutionViewsController
from st2api.controllers.v1.execution_views import SUPPORTED_FILTERS
from st2common import log as logging
from st2common.constants import action as action_constants
from st2common.exceptions import actionrunner as runner_exc
from st2common.exceptions import apivalidation as validation_exc
from st2common.exceptions import param as param_exc
from st2common.exceptions import trace as trace_exc
from st2common.models.api.action import LiveActionAPI
from st2common.models.api.action import LiveActionCreateAPI
from st2common.models.api.base import cast_argument_value
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.db.auth import UserDB
from st2common.persistence.liveaction import LiveAction
from st2common.persistence.execution import ActionExecution
from st2common.persistence.execution import ActionExecutionOutput
from st2common.router import abort
from st2common.router import Response
from st2common.router import NotFoundException
from st2common.services import action as action_service
from st2common.services import executions as execution_service
from st2common.services import trace as trace_service
from st2common.util import isotime
from st2common.util import action_db as action_utils
from st2common.util import param as param_utils
from st2common.util.jsonify import try_loads
from st2common.rbac.types import PermissionType
from st2common.rbac.backends import get_rbac_backend
__all__ = ["ActionExecutionsController"]
LOG = logging.getLogger(__name__)
# Note: We initialize filters here and not in the constructor
SUPPORTED_EXECUTIONS_FILTERS = copy.deepcopy(SUPPORTED_FILTERS)
SUPPORTED_EXECUTIONS_FILTERS.update(
{"timestamp_gt": "start_timestamp.gt", "timestamp_lt": "start_timestamp.lt"}
)
MONITOR_THREAD_EMPTY_Q_SLEEP_TIME = 5
MONITOR_THREAD_NO_WORKERS_SLEEP_TIME = 1
class ActionExecutionsControllerMixin(BaseRestControllerMixin):
"""
Mixin class with shared methods.
"""
model = ActionExecutionAPI
access = ActionExecution
# Those two attributes are mandatory so we can correctly determine and mask secret execution
# parameters
mandatory_include_fields_retrieve = [
"action.parameters",
"action.output_schema",
"runner.runner_parameters",
"runner.output_key",
"parameters",
# Attributes below are mandatory for RBAC installations
"action.pack",
"action.uid",
# Required when rbac.permission_isolation is enabled
"context",
]
# A list of attributes which can be specified using ?exclude_attributes filter
# NOTE: Allowing user to exclude attribute such as action and runner would break secrets
# masking
valid_exclude_attributes = ["result", "trigger_instance", "status"]
def _handle_schedule_execution(
self, liveaction_api, requester_user, context_string=None, show_secrets=False
):
"""
:param liveaction: LiveActionAPI object.
:type liveaction: :class:`LiveActionAPI`
"""
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
# Assert action ref is valid
action_ref = liveaction_api.action
action_db = action_utils.get_action_by_ref(action_ref)
if not action_db:
message = 'Action "%s" cannot be found.' % (action_ref)
LOG.warning(message)
abort(http_client.BAD_REQUEST, message)
# Assert the permissions
permission_type = PermissionType.ACTION_EXECUTE
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=action_db,
permission_type=permission_type,
)
# Validate that the authenticated user is admin if user query param is provided
user = liveaction_api.user or requester_user.name
rbac_utils.assert_user_is_admin_if_user_query_param_is_provided(
user_db=requester_user, user=user
)
try:
return self._schedule_execution(
liveaction=liveaction_api,
requester_user=requester_user,
user=user,
context_string=context_string,
show_secrets=show_secrets,
action_db=action_db,
)
except ValueError as e:
LOG.exception("Unable to execute action.")
abort(http_client.BAD_REQUEST, six.text_type(e))
except jsonschema.ValidationError as e:
LOG.exception("Unable to execute action. Parameter validation failed.")
abort(
http_client.BAD_REQUEST,
re.sub("u'([^']*)'", r"'\1'", getattr(e, "message", six.text_type(e))),
)
except trace_exc.TraceNotFoundException as e:
abort(http_client.BAD_REQUEST, six.text_type(e))
except validation_exc.ValueValidationException as e:
raise e
except Exception as e:
LOG.exception("Unable to execute action. Unexpected error encountered.")
abort(http_client.INTERNAL_SERVER_ERROR, six.text_type(e))
def _schedule_execution(
self,
liveaction,
requester_user,
action_db,
user=None,
context_string=None,
show_secrets=False,
):
# Initialize execution context if it does not exist.
if not hasattr(liveaction, "context"):
liveaction.context = dict()
liveaction.context["user"] = user
liveaction.context["pack"] = action_db.pack
LOG.debug("User is: %s" % liveaction.context["user"])
# Retrieve other st2 context from request header.
if context_string:
context = try_loads(context_string)
if not isinstance(context, dict):
raise ValueError(
"Unable to convert st2-context from the headers into JSON"
f" (was {type(context)})."
)
liveaction.context.update(context)
# Include RBAC context (if RBAC is available and enabled)
if cfg.CONF.rbac.enable:
user_db = UserDB(name=user)
rbac_service = get_rbac_backend().get_service_class()
role_dbs = rbac_service.get_roles_for_user(
user_db=user_db, include_remote=True
)
roles = [role_db.name for role_db in role_dbs]
liveaction.context["rbac"] = {"user": user, "roles": roles}
# Schedule the action execution.
liveaction_db = LiveActionAPI.to_model(liveaction)
runnertype_db = action_utils.get_runnertype_by_name(
action_db.runner_type["name"]
)
try:
liveaction_db.parameters = param_utils.render_live_params(
runnertype_db.runner_parameters,
action_db.parameters,
liveaction_db.parameters,
liveaction_db.context,
)
except param_exc.ParamException:
# We still need to create a request, so liveaction_db is assigned an ID
liveaction_db, actionexecution_db = action_service.create_request(
liveaction=liveaction_db,
action_db=action_db,
runnertype_db=runnertype_db,
validate_params=False,
)
# By this point the execution is already in the DB therefore need to mark it failed.
_, e, tb = sys.exc_info()
action_service.update_status(
liveaction=liveaction_db,
new_status=action_constants.LIVEACTION_STATUS_FAILED,
result={
"error": six.text_type(e),
"traceback": "".join(traceback.format_tb(tb, 20)),
},
)
# Might be a good idea to return the actual ActionExecution rather than bubble up
# the exception.
raise validation_exc.ValueValidationException(six.text_type(e))
# The request should be created after the above call to render_live_params
# so any templates in live parameters have a chance to render.
liveaction_db, actionexecution_db = action_service.create_request(
liveaction=liveaction_db, action_db=action_db, runnertype_db=runnertype_db
)
_, actionexecution_db = action_service.publish_request(
liveaction_db, actionexecution_db
)
mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets)
execution_api = ActionExecutionAPI.from_model(
actionexecution_db, mask_secrets=mask_secrets
)
return Response(json=execution_api, status=http_client.CREATED)
def _get_result_object(self, id):
"""
Retrieve result object for the provided action execution.
:param id: Action execution ID.
:type id: ``str``
:rtype: ``dict``
"""
fields = ["result"]
action_exec_db = (
self.access.impl.model.objects.filter(id=id).only(*fields).get()
)
return action_exec_db.result
def _get_children(
self, id_, requester_user, depth=-1, result_fmt=None, show_secrets=False
):
# make sure depth is int. Url encoding will make it a string and needs to
# be converted back in that case.
depth = int(depth)
LOG.debug("retrieving children for id: %s with depth: %s", id_, depth)
descendants = execution_service.get_descendants(
actionexecution_id=id_, descendant_depth=depth, result_fmt=result_fmt
)
mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets)
return [
self.model.from_model(descendant, mask_secrets=mask_secrets)
for descendant in descendants
]
class BaseActionExecutionNestedController(
ActionExecutionsControllerMixin, ResourceController
):
# Note: We need to override "get_one" and "get_all" to return 404 since nested controller
# don't implement thos methods
# ResourceController attributes
query_options = {}
supported_filters = {}
def get_all(self):
abort(http_client.NOT_FOUND)
def get_one(self, id):
abort(http_client.NOT_FOUND)
class ActionExecutionChildrenController(BaseActionExecutionNestedController):
def get_one(
self, id, requester_user, depth=-1, result_fmt=None, show_secrets=False
):
"""
Retrieve children for the provided action execution.
:rtype: ``list``
"""
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
execution_db = self._get_one_by_id(
id=id,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
permission_type=PermissionType.EXECUTION_VIEW,
)
id = str(execution_db.id)
return self._get_children(
id_=id,
depth=depth,
result_fmt=result_fmt,
requester_user=requester_user,
show_secrets=show_secrets,
)
class ActionExecutionAttributeController(BaseActionExecutionNestedController):
valid_exclude_attributes = [
"action__pack",
"action__uid",
] + ActionExecutionsControllerMixin.valid_exclude_attributes
def get(self, id, attribute, requester_user):
"""
Retrieve a particular attribute for the provided action execution.
Handles requests:
GET /executions/<id>/attribute/<attribute name>
:rtype: ``dict``
"""
fields = [attribute, "action__pack", "action__uid"]
try:
fields = self._validate_exclude_fields(fields)
except ValueError:
valid_attributes = ", ".join(
ActionExecutionsControllerMixin.valid_exclude_attributes
)
msg = 'Invalid attribute "%s" specified. Valid attributes are: %s' % (
attribute,
valid_attributes,
)
raise ValueError(msg)
action_exec_db = (
self.access.impl.model.objects.filter(id=id).only(*fields).get()
)
permission_type = PermissionType.EXECUTION_VIEW
rbac_utils = get_rbac_backend().get_utils_class()
rbac_utils.assert_user_has_resource_db_permission(
user_db=requester_user,
resource_db=action_exec_db,
permission_type=permission_type,
)
result = getattr(action_exec_db, attribute, None)
return Response(json=result, status=http_client.OK)
class ActionExecutionRawResultController(BaseActionExecutionNestedController):
def get(
self, id, requester_user, download=False, compress=False, pretty_format=False
):
"""
Retrieve raw action execution result object as a JSON string or optionally force result
download as a (compressed) file.
This is primarily to be used in scenarios where executions contain large results and JSON
loading and parsing it can be slow (e.g. in the st2web) and we just want to display raw
result.
:param compress: True to compress the response using gzip (may come handy for executions
with large results).
:param download: True to force downloading result to a file.
:param pretty_format: True to pretty format returned JSON data - this adds quite some
overhead compared to the default behavior where we don't pretty
format the result.
Handles requests:
GET /executions/<id>/result[?download=1][&compress=1]
TODO: Maybe we should also support pre-signed URLs for sharing externally with other
people?
It of course won't contain all the exection related data, but just sharing the result can
come handy in many situations.
:rtype: ``str``
"""
# NOTE: Here we intentionally use as_pymongo() to avoid mongoengine layer even for old style
# data
try:
result = (
self.access.impl.model.objects.filter(id=id)
.only("result")
.as_pymongo()[0]
)
except IndexError:
raise NotFoundException("Execution with id %s not found" % (id))
if isinstance(result["result"], dict):
# For backward compatibility we also support old non JSON field storage format
if pretty_format:
response_body = orjson.dumps(
result["result"], option=orjson.OPT_INDENT_2
)
else:
response_body = orjson.dumps(result["result"])
else:
# For new JSON storage format we just use raw value since it's already JSON serialized
# string
response_body = result["result"]
if pretty_format:
# Pretty format is not a default behavior since it adds quite some overhead (e.g.
# 10-30ms for non pretty format for 4 MB json vs ~120 ms for pretty formatted)
response_body = orjson.dumps(
orjson.loads(result["result"]), option=orjson.OPT_INDENT_2
)
response = Response()
response.headers["Content-Type"] = "text/json"
if download:
filename = "execution_%s_result.json" % (id)
if compress:
filename += ".gz"
response.headers["Content-Disposition"] = "attachment; filename=%s" % (
filename
)
if compress:
response.headers["Content-Type"] = "application/x-gzip"
response.headers["Content-Encoding"] = "gzip"
response_body = gzip.compress(response_body)
response.body = response_body
return response
class ActionExecutionOutputController(
ActionExecutionsControllerMixin, ResourceController
):
supported_filters = {"output_type": "output_type"}
exclude_fields = []
def get_one(
self,
id,
output_type="all",
output_format="raw",
existing_only=False,
requester_user=None,
show_secrets=False,
):
# Special case for id == "last"
if id == "last":
execution_db = ActionExecution.query().order_by("-id").limit(1).first()
if not execution_db:
raise ValueError("No executions found in the database")
id = str(execution_db.id)
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
execution_db = self._get_one_by_id(
id=id,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
permission_type=PermissionType.EXECUTION_VIEW,
)
execution_id = str(execution_db.id)
query_filters = {}
if output_type and output_type != "all":
query_filters["output_type"] = output_type
def existing_output_iter():
# Consume and return all of the existing lines
# pylint: disable=no-member
output_dbs = ActionExecutionOutput.query(
execution_id=execution_id, **query_filters
)
output = "".join([output_db.data for output_db in output_dbs])
yield six.binary_type(output.encode("utf-8"))
def make_response():
app_iter = existing_output_iter()
res = Response(content_type="text/plain", app_iter=app_iter)
return res
res = make_response()
return res
class ActionExecutionReRunController(
ActionExecutionsControllerMixin, ResourceController
):
supported_filters = {}
exclude_fields = ["result", "trigger_instance"]
class ExecutionSpecificationAPI(object):
def __init__(self, parameters=None, tasks=None, reset=None, user=None):
self.parameters = parameters or {}
self.tasks = tasks or []
self.reset = reset or []
self.user = user
def validate(self):
if (self.tasks or self.reset) and self.parameters:
raise ValueError(
"Parameters override is not supported when "
"re-running task(s) for a workflow."
)
if self.parameters:
if not isinstance(self.parameters, dict):
raise TypeError(
f"The parameters needs to be a dictionary (was {type(self.parameters)})."
)
if self.tasks:
if not isinstance(self.tasks, list):
raise TypeError(
f"The tasks needs to be a list (was {type(self.tasks)})."
)
if self.reset:
if not isinstance(self.reset, list):
raise TypeError(
f"The reset needs to be a list (was {type(self.reset)})."
)
if list(set(self.reset) - set(self.tasks)):
raise ValueError(
"List of tasks to reset does not match the tasks to rerun."
)
return self
def post(self, spec_api, id, requester_user, no_merge=False, show_secrets=False):
"""
Re-run the provided action execution optionally specifying override parameters.
Handles requests:
POST /executions/<id>/re_run
"""
if (spec_api.tasks or spec_api.reset) and spec_api.parameters:
raise ValueError(
"Parameters override is not supported when "
"re-running task(s) for a workflow."
)
if spec_api.parameters:
if not isinstance(spec_api.parameters, dict):
raise TypeError(
f"The parameters needs to be a dictionary (was {type(spec_api.parameters)})."
)
if spec_api.tasks:
if not isinstance(spec_api.tasks, list):
raise TypeError(
f"The tasks needs to be a list (was {type(spec_api.tasks)})."
)
if spec_api.reset:
if not isinstance(spec_api.reset, list):
raise TypeError(
f"The reset needs to be a list (was {type(spec_api.reset)})."
)
if list(set(spec_api.reset) - set(spec_api.tasks)):
raise ValueError(
"List of tasks to reset does not match the tasks to rerun."
)
delay = None
if hasattr(spec_api, "delay") and isinstance(spec_api.delay, int):
delay = spec_api.delay
no_merge = cast_argument_value(value_type=bool, value=no_merge)
existing_execution = self._get_one_by_id(
id=id,
exclude_fields=self.exclude_fields,
requester_user=requester_user,
permission_type=PermissionType.EXECUTION_VIEW,
)
if spec_api.tasks and existing_execution.runner["name"] != "orquesta":
raise ValueError("Task option is only supported for Orquesta workflows.")
# Merge in any parameters provided by the user
new_parameters = {}
if not no_merge:
new_parameters.update(getattr(existing_execution, "parameters", {}))
new_parameters.update(spec_api.parameters)
# Create object for the new execution
action_ref = existing_execution.action["ref"]
# Include additional option(s) for the execution
context = {
"re-run": {
"ref": id,
}
}
if spec_api.tasks:
context["re-run"]["tasks"] = spec_api.tasks
if spec_api.reset:
context["re-run"]["reset"] = spec_api.reset
# Add trace to the new execution
trace = trace_service.get_trace_db_by_action_execution(
action_execution_id=existing_execution.id
)
if trace:
context["trace_context"] = {"id_": str(trace.id)}
new_liveaction_api = LiveActionCreateAPI(
action=action_ref,
context=context,
parameters=new_parameters,
user=spec_api.user,
delay=delay,
)
return self._handle_schedule_execution(
liveaction_api=new_liveaction_api,
requester_user=requester_user,
show_secrets=show_secrets,
)
class ActionExecutionsController(
BaseResourceIsolationControllerMixin,
ActionExecutionsControllerMixin,
ResourceController,
):
"""
Implements the RESTful web endpoint that handles
the lifecycle of ActionExecutions in the system.
"""
# Nested controllers
views = ExecutionViewsController()
children = ActionExecutionChildrenController()
attribute = ActionExecutionAttributeController()
re_run = ActionExecutionReRunController()
# ResourceController attributes
query_options = {"sort": ["-start_timestamp", "action.ref"]}
supported_filters = SUPPORTED_EXECUTIONS_FILTERS
filter_transform_functions = {
"timestamp_gt": lambda value: isotime.parse(value=value),
"timestamp_lt": lambda value: isotime.parse(value=value),
}
def get_all(
self,
requester_user,
exclude_attributes=None,
sort=None,
offset=0,
limit=None,
show_secrets=False,
include_attributes=None,
advanced_filters=None,
**raw_filters,
):
"""
List all executions.
Handles requests:
GET /executions[?exclude_attributes=result,trigger_instance]
:param exclude_attributes: List of attributes to exclude from the object.
:type exclude_attributes: ``list``
"""
# Use a custom sort order when filtering on a timestamp so we return a correct result as
# expected by the user
query_options = None
if raw_filters.get("timestamp_lt", None) or raw_filters.get("sort_desc", None):
query_options = {"sort": ["-start_timestamp", "action.ref"]}
elif raw_filters.get("timestamp_gt", None) or raw_filters.get("sort_asc", None):
query_options = {"sort": ["+start_timestamp", "action.ref"]}
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
return self._get_action_executions(
exclude_fields=exclude_attributes,
include_fields=include_attributes,
from_model_kwargs=from_model_kwargs,
sort=sort,
offset=offset,
limit=limit,
query_options=query_options,
raw_filters=raw_filters,
advanced_filters=advanced_filters,
requester_user=requester_user,
)
def get_one(
self,
id,
requester_user,
exclude_attributes=None,
include_attributes=None,
show_secrets=False,
max_result_size=None,
):
"""
Retrieve a single execution.
Handles requests:
GET /executions/<id>[?exclude_attributes=result,trigger_instance]
:param exclude_attributes: List of attributes to exclude from the object.
:type exclude_attributes: ``list``
"""
exclude_fields = self._validate_exclude_fields(
exclude_fields=exclude_attributes
)
include_fields = self._validate_include_fields(
include_fields=include_attributes
)
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
max_result_size = self._validate_max_result_size(
max_result_size=max_result_size
)
# Special case for id == "last"
if id == "last":
execution_db = (
ActionExecution.query().order_by("-id").limit(1).only("id").first()
)
if not execution_db:
raise ValueError("No executions found in the database")
id = str(execution_db.id)
return self._get_one_by_id(
id=id,
exclude_fields=exclude_fields,
include_fields=include_fields,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
permission_type=PermissionType.EXECUTION_VIEW,
get_by_id_kwargs={"max_result_size": max_result_size},
)
def post(
self, liveaction_api, requester_user, context_string=None, show_secrets=False
):
return self._handle_schedule_execution(
liveaction_api=liveaction_api,
requester_user=requester_user,
context_string=context_string,
show_secrets=show_secrets,
)
def put(self, id, liveaction_api, requester_user, show_secrets=False):
"""
Updates a single execution.
Handles requests:
PUT /executions/<id>
"""
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
execution_api = self._get_one_by_id(
id=id,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
permission_type=PermissionType.EXECUTION_STOP,
)
if not execution_api:
abort(http_client.NOT_FOUND, "Execution with id %s not found." % id)
liveaction_id = execution_api.liveaction["id"]
if not liveaction_id:
abort(
http_client.INTERNAL_SERVER_ERROR,
"Execution object missing link to liveaction %s." % liveaction_id,
)
try:
liveaction_db = LiveAction.get_by_id(liveaction_id)
except:
abort(
http_client.INTERNAL_SERVER_ERROR,
"Execution object missing link to liveaction %s." % liveaction_id,
)
if liveaction_db.status in action_constants.LIVEACTION_COMPLETED_STATES:
abort(http_client.BAD_REQUEST, "Execution is already in completed state.")
def update_status(liveaction_api, liveaction_db):
status = liveaction_api.status
result = getattr(liveaction_api, "result", None)
liveaction_db = action_service.update_status(
liveaction_db, status, result, set_result_size=True
)
actionexecution_db = ActionExecution.get(
liveaction__id=str(liveaction_db.id)
)
return (liveaction_db, actionexecution_db)
try:
if (
liveaction_db.status == action_constants.LIVEACTION_STATUS_CANCELING
and liveaction_api.status == action_constants.LIVEACTION_STATUS_CANCELED
):
if action_service.is_children_active(liveaction_id):
liveaction_api.status = action_constants.LIVEACTION_STATUS_CANCELING
liveaction_db, actionexecution_db = update_status(
liveaction_api, liveaction_db
)
elif (
liveaction_api.status == action_constants.LIVEACTION_STATUS_CANCELING
or liveaction_api.status == action_constants.LIVEACTION_STATUS_CANCELED
):
liveaction_db, actionexecution_db = action_service.request_cancellation(
liveaction_db, requester_user.name or cfg.CONF.system_user.user
)
elif (
liveaction_db.status == action_constants.LIVEACTION_STATUS_PAUSING
and liveaction_api.status == action_constants.LIVEACTION_STATUS_PAUSED
):
if action_service.is_children_active(liveaction_id):
liveaction_api.status = action_constants.LIVEACTION_STATUS_PAUSING
liveaction_db, actionexecution_db = update_status(
liveaction_api, liveaction_db
)
elif (
liveaction_api.status == action_constants.LIVEACTION_STATUS_PAUSING
or liveaction_api.status == action_constants.LIVEACTION_STATUS_PAUSED
):
liveaction_db, actionexecution_db = action_service.request_pause(
liveaction_db, requester_user.name or cfg.CONF.system_user.user
)
elif liveaction_api.status == action_constants.LIVEACTION_STATUS_RESUMING:
liveaction_db, actionexecution_db = action_service.request_resume(
liveaction_db, requester_user.name or cfg.CONF.system_user.user
)
else:
liveaction_db, actionexecution_db = update_status(
liveaction_api, liveaction_db
)
except runner_exc.InvalidActionRunnerOperationError as e:
LOG.exception(
"Failed updating liveaction %s. %s", liveaction_db.id, six.text_type(e)
)
abort(
http_client.BAD_REQUEST,
"Failed updating execution. %s" % six.text_type(e),
)
except runner_exc.UnexpectedActionExecutionStatusError as e:
LOG.exception(
"Failed updating liveaction %s. %s", liveaction_db.id, six.text_type(e)
)
abort(
http_client.BAD_REQUEST,
"Failed updating execution. %s" % six.text_type(e),
)
except Exception as e:
LOG.exception(
"Failed updating liveaction %s. %s", liveaction_db.id, six.text_type(e)
)
abort(
http_client.INTERNAL_SERVER_ERROR,
"Failed updating execution due to unexpected error.",
)
mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets)
execution_api = ActionExecutionAPI.from_model(
actionexecution_db, mask_secrets=mask_secrets
)
return execution_api
def delete(self, id, requester_user, show_secrets=False):
"""
Stops a single execution.
Handles requests:
DELETE /executions/<id>
"""
if not requester_user:
requester_user = UserDB(name=cfg.CONF.system_user.user)
from_model_kwargs = {
"mask_secrets": self._get_mask_secrets(
requester_user, show_secrets=show_secrets
)
}
execution_api = self._get_one_by_id(
id=id,
requester_user=requester_user,
from_model_kwargs=from_model_kwargs,
permission_type=PermissionType.EXECUTION_STOP,
)
if not execution_api:
abort(http_client.NOT_FOUND, "Execution with id %s not found." % id)
liveaction_id = execution_api.liveaction["id"]
if not liveaction_id:
abort(
http_client.INTERNAL_SERVER_ERROR,
"Execution object missing link to liveaction %s." % liveaction_id,
)
try:
liveaction_db = LiveAction.get_by_id(liveaction_id)
except:
abort(
http_client.INTERNAL_SERVER_ERROR,
"Execution object missing link to liveaction %s." % liveaction_id,
)
if liveaction_db.status == action_constants.LIVEACTION_STATUS_CANCELED:
LOG.info(
'Action %s already in "canceled" state; \
returning execution object.'
% liveaction_db.id
)
return execution_api
if liveaction_db.status not in action_constants.LIVEACTION_CANCELABLE_STATES:
abort(
http_client.OK,
"Action cannot be canceled. State = %s." % liveaction_db.status,
)
try:
(liveaction_db, execution_db) = action_service.request_cancellation(
liveaction_db, requester_user.name or cfg.CONF.system_user.user
)
except:
LOG.exception(
"Failed requesting cancellation for liveaction %s.", liveaction_db.id
)
abort(http_client.INTERNAL_SERVER_ERROR, "Failed canceling execution.")
return ActionExecutionAPI.from_model(
execution_db, mask_secrets=from_model_kwargs["mask_secrets"]
)
def _validate_max_result_size(
self, max_result_size: Optional[int]
) -> Optional[int]:
"""
Validate value of the ?max_result_size query parameter (if provided).
"""
# Maximum limit for MongoDB collection document is 16 MB and the field itself can't be
# larger than that obviously. And in reality due to the other fields, overhead, etc,
# 14 is the upper limit.
if not max_result_size:
return max_result_size
if max_result_size <= 0:
raise ValueError("max_result_size must be a positive number")
if max_result_size > 14 * 1024 * 1024:
raise ValueError(
"max_result_size query parameter must be smaller than 14 MB"
)
return max_result_size
def _get_by_id(
self,
resource_id,
exclude_fields=None,
include_fields=None,
max_result_size=None,
):
"""
Custom version of _get_by_id() which supports ?max_result_size pre-filtering and not
returning result field for executions which result size exceeds this threshold.
This functionality allows us to implement fast and efficient retrievals in st2web.
"""
exclude_fields = exclude_fields or []
include_fields = include_fields or []
if not max_result_size:
# If max_result_size is not provided we don't perform any prefiltering and directly
# call parent method
execution_db = super(ActionExecutionsController, self)._get_by_id(
resource_id=resource_id,
exclude_fields=exclude_fields,
include_fields=include_fields,
)
return execution_db
# Special query where we check if result size is smaller than pre-defined or that field
# doesn't not exist (old executions) and only return the result if the condition is met.
# This allows us to implement fast and efficient retrievals of executions on the client
# st2web side where we don't want to retrieve and display result directly for executions
# with large results
# Keep in mind that the query itself is very fast and adds almost no overhead for API
# operations which pass this query parameter because we first filter on the ID (indexed
# field) and perform projection query with two tiny fields (based on real life testing it
# takes less than 3 ms in most scenarios).
execution_db = self.access.get(
Q(id=resource_id)
& (Q(result_size__lte=max_result_size) | Q(result_size__not__exists=True)),
only_fields=["id", "result_size"],
)
# if result is empty, this means that execution either doesn't exist or the result is
# larger than threshold which means we don't want to retrieve and return result to
# the end user to we set exclude_fields accordingly
if not execution_db:
LOG.debug(
"Execution with id %s and result_size < %s not found. This means "
"execution with this ID doesn't exist or result_size exceeds the "
"threshold. Result field will be excluded from the retrieval and "
"the response." % (resource_id, max_result_size)
)
if include_fields and "result" in include_fields:
include_fields.remove("result")
elif not include_fields:
exclude_fields += ["result"]
# Now call parent get by id with potentially modified include / exclude fields in case
# result should not be included
execution_db = super(ActionExecutionsController, self)._get_by_id(
resource_id=resource_id,
exclude_fields=exclude_fields,
include_fields=include_fields,
)
return execution_db
def _get_action_executions(
self,
exclude_fields=None,
include_fields=None,
sort=None,
offset=0,
limit=None,
advanced_filters=None,
query_options=None,
raw_filters=None,
from_model_kwargs=None,
requester_user=None,
):
"""
:param exclude_fields: A list of object fields to exclude.
:type exclude_fields: ``list``
"""
if limit is None:
limit = self.default_limit
limit = int(limit)
LOG.debug(
"Retrieving all action executions with filters=%s,exclude_fields=%s,"
"include_fields=%s",
raw_filters,
exclude_fields,
include_fields,
)
return super(ActionExecutionsController, self)._get_all(
exclude_fields=exclude_fields,
include_fields=include_fields,
from_model_kwargs=from_model_kwargs,
sort=sort,
offset=offset,
limit=limit,
query_options=query_options,
raw_filters=raw_filters,
advanced_filters=advanced_filters,
requester_user=requester_user,
)
action_executions_controller = ActionExecutionsController()
action_execution_output_controller = ActionExecutionOutputController()
action_execution_rerun_controller = ActionExecutionReRunController()
action_execution_attribute_controller = ActionExecutionAttributeController()
action_execution_children_controller = ActionExecutionChildrenController()
action_execution_raw_result_controller = ActionExecutionRawResultController()
|
|
import os
import os.path as op
import warnings
import gc
from nose.tools import assert_true, assert_raises
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution, SourceEstimate, pick_types_forward,
read_evokeds)
from mne.tests.common import assert_naming
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess, _TempDir,
run_tests_if_main, slow_test)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward)
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
fname_mri = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
def compare_forwards(f1, f2):
"""Helper to compare two potentially converted forward solutions"""
assert_allclose(f1['sol']['data'], f2['sol']['data'])
assert_equal(f1['sol']['ncol'], f2['sol']['ncol'])
assert_allclose(f1['source_nn'], f2['source_nn'])
if f1['sol_grad'] is not None:
assert_true(f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert_equal(f1['sol_grad']['ncol'], f2['sol_grad']['ncol'])
else:
assert_true(f2['sol_grad'] is None)
assert_equal(f1['source_ori'], f2['source_ori'])
assert_equal(f1['surf_ori'], f2['surf_ori'])
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations
"""
fwd = read_forward_solution(fname_meeg_grad)
assert_true(repr(fwd))
assert_true(isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
fwd_surf_io = read_forward_solution(fname_meeg_grad, surf_ori=True)
compare_forwards(fwd_surf, fwd_surf_io)
del fwd_surf_io
gc.collect()
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=False,
force_fixed=True)
del fwd_surf
gc.collect()
assert_true(repr(fwd_fixed))
assert_true(isinstance(fwd_fixed, Forward))
fwd_fixed_io = read_forward_solution(fname_meeg_grad, surf_ori=False,
force_fixed=True)
compare_forwards(fwd_fixed, fwd_fixed_io)
del fwd_fixed_io
gc.collect()
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed)
assert_true(repr(fwd_new))
assert_true(isinstance(fwd_new, Forward))
compare_forwards(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@slow_test
@testing.requires_testing_data
def test_io_forward():
"""Test IO for forward solutions
"""
temp_dir = _TempDir()
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert_true(isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = op.join(temp_dir, 'test-fwd.fif')
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad, surf_ori=True)
fwd_read = read_forward_solution(fname_temp, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd_read['info'])
assert_true('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg_grad, force_fixed=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert_true('dev_head_t' in fwd['info'])
assert_true('mri_head_t' in fwd)
assert_true(fwd['surf_ori'])
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
fwd_badname = op.join(temp_dir, 'test-bad-name.fif.gz')
write_forward_solution(fwd_badname, fwd)
read_forward_solution(fwd_badname)
assert_naming(w, 'test_forward.py', 2)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
compare_forwards(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
assert_true(isinstance(fwd, Forward))
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
with warnings.catch_warnings(record=True) as w:
evoked = read_evokeds(fname_evoked, condition=0)
evoked.pick_types(meg=True)
evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
assert_equal(len(w), 2)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# Raw
raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start,
stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc():
"""Test restriction of source space to source SourceEstimate
"""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_true(isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
@testing.requires_testing_data
def test_restrict_forward_to_label():
"""Test restriction of source space to label
"""
fwd = read_forward_solution(fname_meeg, force_fixed=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
fwd = read_forward_solution(fname_meeg, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution():
"""Test averaging forward solutions
"""
temp_dir = _TempDir()
fwd = read_forward_solution(fname_meeg)
# input not a list
assert_raises(TypeError, average_forward_solutions, 1)
# list is too short
assert_raises(ValueError, average_forward_solutions, [])
# negative weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
assert_raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
assert_raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert_true(isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = op.join(temp_dir, 'copy-fwd.fif')
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
compare_forwards(fwd, fwd_ave)
run_tests_if_main()
|
|
# pylint: disable=too-many-instance-attributes,attribute-defined-outside-init
"""
account.py
==========
Account related functions.
"""
from __future__ import absolute_import
import inspect
import re
import sys
import time
from PyQt4 import QtGui
import queues
from addresses import decodeAddress
from bmconfigparser import BMConfigParser
from helper_ackPayload import genAckPayload
from helper_sql import sqlQuery, sqlExecute
from .foldertree import AccountMixin
from .utils import str_broadcast_subscribers
def getSortedAccounts():
"""Get a sorted list of configSections"""
configSections = BMConfigParser().addresses()
configSections.sort(
cmp=lambda x, y: cmp(
unicode(
BMConfigParser().get(
x,
'label'),
'utf-8').lower(),
unicode(
BMConfigParser().get(
y,
'label'),
'utf-8').lower()))
return configSections
def getSortedSubscriptions(count=False):
"""
Actually return a grouped dictionary rather than a sorted list
:param count: Whether to count messages for each fromaddress in the inbox
:type count: bool, default False
:retuns: dict keys are addresses, values are dicts containing settings
:rtype: dict, default {}
"""
queryreturn = sqlQuery('SELECT label, address, enabled FROM subscriptions ORDER BY label COLLATE NOCASE ASC')
ret = {}
for row in queryreturn:
label, address, enabled = row
ret[address] = {}
ret[address]["inbox"] = {}
ret[address]["inbox"]['label'] = label
ret[address]["inbox"]['enabled'] = enabled
ret[address]["inbox"]['count'] = 0
if count:
queryreturn = sqlQuery('''SELECT fromaddress, folder, count(msgid) as cnt
FROM inbox, subscriptions ON subscriptions.address = inbox.fromaddress
WHERE read = 0 AND toaddress = ?
GROUP BY inbox.fromaddress, folder''', str_broadcast_subscribers)
for row in queryreturn:
address, folder, cnt = row
if folder not in ret[address]:
ret[address][folder] = {
'label': ret[address]['inbox']['label'],
'enabled': ret[address]['inbox']['enabled']
}
ret[address][folder]['count'] = cnt
return ret
def accountClass(address):
"""Return a BMAccount for the address"""
if not BMConfigParser().has_section(address):
# .. todo:: This BROADCAST section makes no sense
if address == str_broadcast_subscribers:
subscription = BroadcastAccount(address)
if subscription.type != AccountMixin.BROADCAST:
return None
else:
subscription = SubscriptionAccount(address)
if subscription.type != AccountMixin.SUBSCRIPTION:
# e.g. deleted chan
return NoAccount(address)
return subscription
try:
gateway = BMConfigParser().get(address, "gateway")
for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if issubclass(cls, GatewayAccount) and cls.gatewayName == gateway:
return cls(address)
# general gateway
return GatewayAccount(address)
except:
pass
# no gateway
return BMAccount(address)
class AccountColor(AccountMixin): # pylint: disable=too-few-public-methods
"""Set the type of account"""
def __init__(self, address, address_type=None):
self.isEnabled = True
self.address = address
if address_type is None:
if address is None:
self.type = AccountMixin.ALL
elif BMConfigParser().safeGetBoolean(self.address, 'mailinglist'):
self.type = AccountMixin.MAILINGLIST
elif BMConfigParser().safeGetBoolean(self.address, 'chan'):
self.type = AccountMixin.CHAN
elif sqlQuery(
'''select label from subscriptions where address=?''', self.address):
self.type = AccountMixin.SUBSCRIPTION
else:
self.type = AccountMixin.NORMAL
else:
self.type = address_type
class BMAccount(object):
"""Encapsulate a Bitmessage account"""
def __init__(self, address=None):
self.address = address
self.type = AccountMixin.NORMAL
if BMConfigParser().has_section(address):
if BMConfigParser().safeGetBoolean(self.address, 'chan'):
self.type = AccountMixin.CHAN
elif BMConfigParser().safeGetBoolean(self.address, 'mailinglist'):
self.type = AccountMixin.MAILINGLIST
elif self.address == str_broadcast_subscribers:
self.type = AccountMixin.BROADCAST
else:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', self.address)
if queryreturn:
self.type = AccountMixin.SUBSCRIPTION
def getLabel(self, address=None):
"""Get a label for this bitmessage account"""
if address is None:
address = self.address
label = BMConfigParser().safeGet(address, 'label', address)
queryreturn = sqlQuery(
'''select label from addressbook where address=?''', address)
if queryreturn != []:
for row in queryreturn:
label, = row
else:
queryreturn = sqlQuery(
'''select label from subscriptions where address=?''', address)
if queryreturn != []:
for row in queryreturn:
label, = row
return label
def parseMessage(self, toAddress, fromAddress, subject, message):
"""Set metadata and address labels on self"""
self.toAddress = toAddress
self.fromAddress = fromAddress
if isinstance(subject, unicode):
self.subject = str(subject)
else:
self.subject = subject
self.message = message
self.fromLabel = self.getLabel(fromAddress)
self.toLabel = self.getLabel(toAddress)
class NoAccount(BMAccount):
"""Override the __init__ method on a BMAccount"""
def __init__(self, address=None): # pylint: disable=super-init-not-called
self.address = address
self.type = AccountMixin.NORMAL
def getLabel(self, address=None):
if address is None:
address = self.address
return address
class SubscriptionAccount(BMAccount):
"""Encapsulate a subscription account"""
pass
class BroadcastAccount(BMAccount):
"""Encapsulate a broadcast account"""
pass
class GatewayAccount(BMAccount):
"""Encapsulate a gateway account"""
gatewayName = None
ALL_OK = 0
REGISTRATION_DENIED = 1
def __init__(self, address):
super(GatewayAccount, self).__init__(address)
def send(self):
"""Override the send method for gateway accounts"""
# pylint: disable=unused-variable
status, addressVersionNumber, streamNumber, ripe = decodeAddress(self.toAddress)
stealthLevel = BMConfigParser().safeGetInt('bitmessagesettings', 'ackstealthlevel')
ackdata = genAckPayload(streamNumber, stealthLevel)
sqlExecute(
'''INSERT INTO sent VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)''',
'',
self.toAddress,
ripe,
self.fromAddress,
self.subject,
self.message,
ackdata,
int(time.time()), # sentTime (this will never change)
int(time.time()), # lastActionTime
0, # sleepTill time. This will get set when the POW gets done.
'msgqueued',
0, # retryNumber
'sent', # folder
2, # encodingtype
# not necessary to have a TTL higher than 2 days
min(BMConfigParser().getint('bitmessagesettings', 'ttl'), 86400 * 2)
)
queues.workerQueue.put(('sendmessage', self.toAddress))
class MailchuckAccount(GatewayAccount):
"""Encapsulate a particular kind of gateway account"""
# set "gateway" in keys.dat to this
gatewayName = "mailchuck"
registrationAddress = "BM-2cVYYrhaY5Gbi3KqrX9Eae2NRNrkfrhCSA"
unregistrationAddress = "BM-2cVMAHTRjZHCTPMue75XBK5Tco175DtJ9J"
relayAddress = "BM-2cWim8aZwUNqxzjMxstnUMtVEUQJeezstf"
regExpIncoming = re.compile(r"(.*)MAILCHUCK-FROM::(\S+) \| (.*)")
regExpOutgoing = re.compile(r"(\S+) (.*)")
def __init__(self, address):
super(MailchuckAccount, self).__init__(address)
self.feedback = self.ALL_OK
def createMessage(self, toAddress, fromAddress, subject, message):
"""createMessage specific to a MailchuckAccount"""
self.subject = toAddress + " " + subject
self.toAddress = self.relayAddress
self.fromAddress = fromAddress
self.message = message
def register(self, email):
"""register specific to a MailchuckAccount"""
self.toAddress = self.registrationAddress
self.subject = email
self.message = ""
self.fromAddress = self.address
self.send()
def unregister(self):
"""unregister specific to a MailchuckAccount"""
self.toAddress = self.unregistrationAddress
self.subject = ""
self.message = ""
self.fromAddress = self.address
self.send()
def status(self):
"""status specific to a MailchuckAccount"""
self.toAddress = self.registrationAddress
self.subject = "status"
self.message = ""
self.fromAddress = self.address
self.send()
def settings(self):
"""settings specific to a MailchuckAccount"""
self.toAddress = self.registrationAddress
self.subject = "config"
self.message = QtGui.QApplication.translate(
"Mailchuck",
"""# You can use this to configure your email gateway account
# Uncomment the setting you want to use
# Here are the options:
#
# pgp: server
# The email gateway will create and maintain PGP keys for you and sign, verify,
# encrypt and decrypt on your behalf. When you want to use PGP but are lazy,
# use this. Requires subscription.
#
# pgp: local
# The email gateway will not conduct PGP operations on your behalf. You can
# either not use PGP at all, or use it locally.
#
# attachments: yes
# Incoming attachments in the email will be uploaded to MEGA.nz, and you can
# download them from there by following the link. Requires a subscription.
#
# attachments: no
# Attachments will be ignored.
#
# archive: yes
# Your incoming emails will be archived on the server. Use this if you need
# help with debugging problems or you need a third party proof of emails. This
# however means that the operator of the service will be able to read your
# emails even after they have been delivered to you.
#
# archive: no
# Incoming emails will be deleted from the server as soon as they are relayed
# to you.
#
# masterpubkey_btc: BIP44 xpub key or electrum v1 public seed
# offset_btc: integer (defaults to 0)
# feeamount: number with up to 8 decimal places
# feecurrency: BTC, XBT, USD, EUR or GBP
# Use these if you want to charge people who send you emails. If this is on and
# an unknown person sends you an email, they will be requested to pay the fee
# specified. As this scheme uses deterministic public keys, you will receive
# the money directly. To turn it off again, set "feeamount" to 0. Requires
# subscription.
""")
self.fromAddress = self.address
def parseMessage(self, toAddress, fromAddress, subject, message):
"""parseMessage specific to a MailchuckAccount"""
super(MailchuckAccount, self).parseMessage(toAddress, fromAddress, subject, message)
if fromAddress == self.relayAddress:
matches = self.regExpIncoming.search(subject)
if matches is not None:
self.subject = ""
if not matches.group(1) is None:
self.subject += matches.group(1)
if not matches.group(3) is None:
self.subject += matches.group(3)
if not matches.group(2) is None:
self.fromLabel = matches.group(2)
self.fromAddress = matches.group(2)
if toAddress == self.relayAddress:
matches = self.regExpOutgoing.search(subject)
if matches is not None:
if not matches.group(2) is None:
self.subject = matches.group(2)
if not matches.group(1) is None:
self.toLabel = matches.group(1)
self.toAddress = matches.group(1)
self.feedback = self.ALL_OK
if fromAddress == self.registrationAddress and self.subject == "Registration Request Denied":
self.feedback = self.REGISTRATION_DENIED
return self.feedback
|
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.soundcloud_id'
db.add_column('blogs_post', 'soundcloud_id',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
# Adding field 'Post.soundcloud_url'
db.add_column('blogs_post', 'soundcloud_url',
self.gf('django.db.models.fields.URLField')(default='', max_length=300, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Post.soundcloud_id'
db.delete_column('blogs_post', 'soundcloud_id')
# Deleting field 'Post.soundcloud_url'
db.delete_column('blogs_post', 'soundcloud_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'blogs.blog': {
'Meta': {'object_name': 'Blog'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'custom_domain': ('django.db.models.fields.CharField', [], {'max_length': '300', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'draft_notice': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'facebook_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'has_artists': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'header_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_bootblog': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'main_color': ('django.db.models.fields.CharField', [], {'default': "'#ff7f00'", 'max_length': '10'}),
'moderator_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'pinterest_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'short_description': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '30'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Template']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '240'}),
'translation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True', 'blank': 'True'}),
'twitter_link': ('django.db.models.fields.URLField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'twitter_oauth_token_secret': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'blogs.category': {
'Meta': {'object_name': 'Category'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_category'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'cat_image': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_caret': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_close': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_email': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_fb': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_left': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_pint': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_right': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'cat_image_tw': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'color': ('django.db.models.fields.CharField', [], {'default': "'#000000'", 'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'parent_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_category'", 'null': 'True', 'to': "orm['blogs.Category']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'Comment_author'", 'null': 'True', 'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'max_length': '10000'}),
'comment_status': ('django.db.models.fields.CharField', [], {'default': "'pe'", 'max_length': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'notify_me': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'post': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Post']", 'null': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'})
},
'blogs.info_email': {
'Meta': {'object_name': 'Info_email'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'We'", 'max_length': '2', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'subject': ('django.db.models.fields.TextField', [], {'max_length': '100', 'blank': 'True'}),
'subscribers': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '2', 'null': 'True'})
},
'blogs.language': {
'Meta': {'object_name': 'Language'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'language_name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menu': {
'Meta': {'object_name': 'Menu'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_menu'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_main': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'blogs.menuitem': {
'Meta': {'object_name': 'MenuItem'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.URLField', [], {'max_length': '140', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Menu']", 'null': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Page']", 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '40', 'blank': 'True'})
},
'blogs.page': {
'Meta': {'object_name': 'Page'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_page'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'})
},
'blogs.post': {
'Meta': {'object_name': 'Post'},
'artist': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'base62id': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'category': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_0': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_01': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_1': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_2': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_3': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_4': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_5': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_6': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'content_video': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_discarded': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ready': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_sticky': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_top': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'karma': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}),
'layout_type': ('django.db.models.fields.CharField', [], {'default': "'s'", 'max_length': '1'}),
'message': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'pic': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_0': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_04': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_1': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_10': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_11': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_12': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_13': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_14': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_15': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_16': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_17': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_18': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_19': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_2': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_20': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_21': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_22': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_23': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_24': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_25': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_26': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_27': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_28': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_29': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_3': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_30': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_31': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_32': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_33': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_4': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_5': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_6': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_7': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_8': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pic_9': ('sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'publish_on_facebook': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '140', 'blank': 'True'}),
'soundcloud_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'soundcloud_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'source': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '2', 'null': 'True'}),
'tag': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['blogs.Tag']", 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'translated_content': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'translated_title': ('django.db.models.fields.CharField', [], {'max_length': '140', 'blank': 'True'}),
'video': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_ogg': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'video_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'views': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'vimeo_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'})
},
'blogs.rss': {
'Meta': {'object_name': 'Rss'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_rss'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'feed_url': ('django.db.models.fields.URLField', [], {'max_length': '300', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'blogs.subscription': {
'Meta': {'object_name': 'Subscription'},
'blog': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['blogs.Blog']", 'null': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_new': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'blogs.tag': {
'Meta': {'object_name': 'Tag'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Blog_tag'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '140', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '140'})
},
'blogs.template': {
'Meta': {'object_name': 'Template'},
'archives': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'base': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'category': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'single': ('django.db.models.fields.TextField', [], {'max_length': '10000', 'blank': 'True'})
},
'blogs.translation': {
'Meta': {'object_name': 'Translation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'origin_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_origin_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"}),
'translated_blog': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'Translation_translated_blog'", 'null': 'True', 'to': "orm['blogs.Blog']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['blogs']
|
|
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import sqlalchemy.orm.exc as sa_exc
from neutron.common import exceptions as exc
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from networking_vsphere._i18n import _, _LE, _LI, _LW
from networking_vsphere.db import ovsvapp_models as models
from networking_vsphere.extensions import ovsvapp_cluster
from networking_vsphere.extensions import ovsvapp_mitigated_cluster as vapp_mc
LOG = log.getLogger(__name__)
RETRY = "0"
GIVE_UP = "-1"
SUCCESS = "1"
def _generate_vcenter_cluster_allocations(session, vcenter, cluster):
bulk_size = 100
allocs = []
lvid_min = 1
lvid_max = 4095
for lvid in range(lvid_min, lvid_max):
allocs.append({'vcenter_id': vcenter,
'cluster_id': cluster,
'lvid': lvid})
if allocs:
chuncked_allocs = (allocs[i:i + bulk_size] for i in
range(0, len(allocs), bulk_size))
for bulk in chuncked_allocs:
session.execute(models.ClusterVNIAllocations.
__table__.insert(), bulk)
LOG.info(_LI("Finished initializing local vlans for cluster %(cluster)s "
"of vCenter %(vcenter)s."), {'cluster': cluster,
'vcenter': vcenter})
def _initialize_lvids_for_cluster(port_info):
vcenter = port_info['vcenter_id']
cluster = port_info['cluster_id']
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
(session.query(models.ClusterVNIAllocations).
with_lockmode('update')).all()
query = session.query(models.ClusterVNIAllocations)
existing_allocations = query.filter(
models.ClusterVNIAllocations.vcenter_id == vcenter,
models.ClusterVNIAllocations.cluster_id == cluster
).all()
if not existing_allocations:
_generate_vcenter_cluster_allocations(
session, vcenter, cluster)
return True
except Exception:
LOG.exception(_LE("Exception while initializing VNI "
"allocations for clusters %(cluster)s of "
"vCenter %(vcenter)s."),
{'cluster': cluster,
'vcenter': vcenter})
return False
def _try_to_obtain_local_vlan(session, port_info, assign):
lvid = None
res_keys = ['vcenter_id', 'cluster_id', 'network_id']
res = dict((k, port_info[k]) for k in res_keys)
try:
allocation = (session.query(models.ClusterVNIAllocations).filter(
models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'],
models.ClusterVNIAllocations.cluster_id == res['cluster_id'],
models.ClusterVNIAllocations.network_id == res['network_id']
).one())
lvid = allocation.lvid
if assign:
count = allocation.network_port_count + 1
allocation.update({'network_port_count': count})
LOG.debug("Incremented the allocated port count for network "
"%s.", res)
except sa_exc.NoResultFound:
if not assign:
raise Exception()
try:
allocation = session.query(models.ClusterVNIAllocations).filter(
models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'],
models.ClusterVNIAllocations.cluster_id == res['cluster_id'],
models.ClusterVNIAllocations.allocated == 0
).first()
if allocation:
lvid = allocation.lvid
allocation.update({'network_id': res['network_id'],
'allocated': True,
'network_port_count': 1})
LOG.info(_LI("Assigned local vlan %(lvid)s for the network "
"%(network)s on the cluster %(cluster)s."),
{'network': port_info['network_id'],
'cluster': port_info['cluster_id'],
'lvid': lvid})
else:
LOG.error(_LE("All available VLANs are used up in the cluster "
"%(cluster)s of vCenter %(vcenter)s."),
{'cluster': res['cluster_id'],
'vcenter': res['vcenter_id']})
except Exception as e:
LOG.exception(_LE("Unable to obtain local vlan id %s."), e)
return lvid
def get_local_vlan(port_info, assign=True):
lvid = None
session = db_api.get_session()
res_keys = ['vcenter_id', 'cluster_id', 'network_id']
res = dict((k, port_info[k]) for k in res_keys)
with session.begin(subtransactions=True):
try:
if not assign:
lvid = _try_to_obtain_local_vlan(session, port_info, assign)
return lvid
query = session.query(models.ClusterVNIAllocations)
# Lock all the rows in the table corresponding to the vCenter
# and cluster.
cluster_rows = query.filter(
(models.ClusterVNIAllocations.vcenter_id == res['vcenter_id']),
(models.ClusterVNIAllocations.cluster_id == res['cluster_id'])
).with_lockmode('update').all()
if cluster_rows:
lvid = _try_to_obtain_local_vlan(session, port_info, assign)
return lvid
else:
LOG.info(_LI("Local VLAN rows not provisioned for the "
"cluster %(cluster)s of vCenter %(vcenter)s. "
"Going to provision."),
{'cluster': res['cluster_id'],
'vcenter': res['vcenter_id']})
except Exception:
LOG.exception(_LE("Error retrieving a local vlan for network "
"%(network)s for %(port)s."),
{'network': port_info['network_id'],
'port': port_info['port_id']})
return
status = _initialize_lvids_for_cluster(res)
if status:
with session.begin(subtransactions=True):
lvid = _try_to_obtain_local_vlan(session, port_info, assign)
else:
LOG.error(_LE("Local VLAN rows not provisioned for the "
"cluster %(cluster)s of vCenter %(vcenter)s."),
{'cluster': res['cluster_id'],
'vcenter': res['vcenter_id']})
return lvid
def check_to_reclaim_local_vlan(port_info):
lvid = -1
session = db_api.get_session()
with session.begin(subtransactions=True):
res_keys = ['vcenter_id', 'cluster_id', 'network_id']
res = dict((k, port_info[k]) for k in res_keys)
try:
query = session.query(models.ClusterVNIAllocations)
allocation = (query.filter(
models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'],
models.ClusterVNIAllocations.cluster_id == res['cluster_id'],
models.ClusterVNIAllocations.network_id == res['network_id']
).with_lockmode('update').one())
count = allocation.network_port_count
if count >= 1:
count -= 1
allocation.update({'network_port_count': count})
LOG.debug("Decremented the allocated port count for network "
"%s.", res)
if count == 0:
lvid = allocation.lvid
LOG.info(_LI("lvid can be released for network: %s."), res)
except sa_exc.NoResultFound:
# Nothing to do, may be another controller cleared the record
# We will just log and return back status as False.
LOG.debug("Network %(network)s is already de-allocated for "
"cluster %(cluster)s.",
{'network': port_info['network_id'],
'cluster': port_info['cluster_id']})
return lvid
def release_local_vlan(net_info):
session = db_api.get_session()
with session.begin(subtransactions=True):
res_keys = ['vcenter_id', 'cluster_id', 'network_id']
res = dict((k, net_info[k]) for k in res_keys)
try:
query = session.query(models.ClusterVNIAllocations)
allocation = (query.filter(
models.ClusterVNIAllocations.vcenter_id == res['vcenter_id'],
models.ClusterVNIAllocations.cluster_id == res['cluster_id'],
models.ClusterVNIAllocations.network_id == res['network_id']
).with_lockmode('update').one())
if allocation.network_port_count == 0:
allocation.update({'network_id': None,
'allocated': False,
'network_port_count': 0})
LOG.info(_LI("Released lvid for network: %s."), res)
else:
LOG.info(_LI("Unable to release local vlan for network_id %s "
"because ports are available on network."),
res['network_id'])
except sa_exc.NoResultFound:
# Nothing to do, may be another controller cleared the record
# We will just log and return.
LOG.error(_LE("Network %(network)s is already de-allocated for "
"cluster %(cluster)s."),
{'network': net_info['network_id'],
'cluster': net_info['cluster_id']})
def get_stale_local_vlans_for_network(network_id):
session = db_api.get_session()
vcenter_clusters = None
with session.begin(subtransactions=True):
try:
query = session.query(models.ClusterVNIAllocations)
allocations = (query.filter(
models.ClusterVNIAllocations.network_id == network_id
).all())
if allocations:
vcenter_clusters = []
for alloc in allocations:
vcenter_clusters.append((alloc.vcenter_id,
alloc.cluster_id,
alloc.lvid))
LOG.info(_LI("Found stale allocations for network "
"%s."), network_id)
except Exception:
# Nothing to do, port-deletions have properly cleaned up
# the records. We will just log and return back empty list.
LOG.debug("Network %s is already cleaned up from "
"VNI allocations table.", network_id)
return vcenter_clusters
def update_and_get_cluster_lock(vcenter_id, cluster_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).with_lockmode('update').one())
if not cluster_row.threshold_reached:
if not cluster_row.being_mitigated:
cluster_row.update({'being_mitigated': True})
LOG.info(_LI("Blocked the cluster %s for maintenance."),
cluster_id)
return SUCCESS
else:
LOG.info(_LI("Cluster %s is under maintenance. "
"Will retry later"), cluster_id)
return RETRY
else:
LOG.warning(_LW("Cluster %(id)s in vCenter %(vc)s needs "
"attention. "
"Not able to put hosts to maintenance!"),
{'id': cluster_id,
'vc': vcenter_id})
return GIVE_UP
except sa_exc.NoResultFound:
# First fault case in this cluster_id.
cluster_row = {'vcenter_id': vcenter_id,
'cluster_id': cluster_id,
'being_mitigated': True}
session.execute(models.OVSvAppClusters.__table__.insert(),
cluster_row)
LOG.info(_LI("Blocked the cluster %s for maintenance."),
cluster_id)
return SUCCESS
def release_cluster_lock(vcenter_id, cluster_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).with_lockmode('update').one())
cluster_row.update({'being_mitigated': False,
'threshold_reached': False})
except sa_exc.NoResultFound:
LOG.error(_LE("Cannot update the row for cluster %s."), cluster_id)
def reset_cluster_threshold(vcenter_id, cluster_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).with_lockmode('update').one())
if cluster_row.threshold_reached:
cluster_row.update({'being_mitigated': False,
'threshold_reached': False})
except sa_exc.NoResultFound:
# First agent in this cluster
LOG.error(_LE("Cluster row not found for %s."), cluster_id)
cluster_row = {'vcenter_id': vcenter_id,
'cluster_id': cluster_id}
session.execute(models.OVSvAppClusters.__table__.insert(),
cluster_row)
def set_cluster_threshold(vcenter_id, cluster_id):
session = db_api.get_session()
with session.begin(subtransactions=True):
try:
query = session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).with_lockmode('update').one())
LOG.info(_LI("Cluster row found for %s."), cluster_row)
if not cluster_row.threshold_reached:
cluster_row.update({'being_mitigated': False,
'threshold_reached': True})
except sa_exc.NoResultFound:
LOG.error(_LE("Cluster row not found for %s."), cluster_id)
def _admin_check(context, action):
"""Admin role check helper."""
if not context.is_admin:
reason = _('Cannot %s resource for non admin tenant') % action
raise exc.AdminRequired(reason=reason)
class OVSvAppClusterDbMixin(ovsvapp_cluster.OVSvAppClusterPluginBase):
def get_ovsvapp_cluster(self, context, vcenter_id, fields=None):
_admin_check(context, 'GET')
LOG.info(_LI("Retrieving vCenter cluster information for vcenter_id:"
" %s."), vcenter_id)
db_table = models.ClusterVNIAllocations
query = context.session.query(db_table)
filter_query = query.filter(db_table.vcenter_id == vcenter_id)
grouped_query = filter_query.group_by('cluster_id')
query_objs = grouped_query.all()
if len(query_objs) == 0:
_msg = ("No details found for vCenter:"
"%(vcenter_id)s") % {'vcenter_id': vcenter_id}
raise exc.InvalidInput(error_message=_msg)
vcenter_dict = dict()
vcenter_dict['vcenter_id'] = vcenter_id
vcenter_dict['clusters'] = [obj.cluster_id for obj in query_objs]
return vcenter_dict
def get_ovsvapp_clusters(self, context, filters=None, fields=None):
_admin_check(context, 'GET')
LOG.info(_LI("Retrieving vCenter cluster information."))
if filters:
if 'vcenter_id' in filters.keys():
vcenter_id = filters['vcenter_id'][0]
return [self.get_vcenter_cluster(context, vcenter_id)]
_msg = "Invalid filter specified"
raise exc.InvalidInput(error_message=_msg)
query = context.session.query(models.ClusterVNIAllocations).group_by(
'vcenter_id')
query_objs = query.all()
vcenter_set = set([vcenter.vcenter_id for vcenter in query_objs])
vcenter_list = list()
for vcenter in vcenter_set:
filter_query = query.filter(models.ClusterVNIAllocations.
vcenter_id == vcenter)
grouped_objs = filter_query.group_by('cluster_id').all()
vcenter_dict = dict()
vcenter_dict['vcenter_id'] = vcenter
vcenter_dict['clusters'] = [obj.cluster_id for obj in grouped_objs]
vcenter_list.append(vcenter_dict)
return vcenter_list
def create_ovsvapp_cluster(self, context, ovsvapp_cluster):
_admin_check(context, 'CREATE')
vcenter = ovsvapp_cluster['ovsvapp_cluster']
vcenter_clusters = vcenter['clusters']
LOG.info(_LI("Creating a vCenter cluster entry with vcenter id %s."),
vcenter['vcenter_id'])
for cluster_name in vcenter_clusters:
vcenter_info = dict()
vcenter_info['vcenter_id'] = vcenter['vcenter_id']
vcenter_info['cluster_id'] = cluster_name
if not _initialize_lvids_for_cluster(vcenter_info):
raise exc.InvalidInput(error_message='Cannot create DB entry.')
return vcenter
def update_ovsvapp_cluster(self, context, id, ovsvapp_cluster):
_admin_check(context, 'UPDATE')
vcenter_id = id
clusters_list = ovsvapp_cluster['ovsvapp_cluster']['clusters']
LOG.info(_LI("Deleting the vCenter clusters %(cluster_id)s with"
"vCenter id %(vcenter_id)s."),
{'cluster_id': clusters_list,
'vcenter_id': id})
with context.session.begin(subtransactions=True):
query = context.session.query(models.ClusterVNIAllocations)
for cluster_id in clusters_list:
# Do a bulk delete operation with each cluster.
query.filter(
models.ClusterVNIAllocations.vcenter_id == vcenter_id,
models.ClusterVNIAllocations.cluster_id == cluster_id
).delete()
return ovsvapp_cluster['ovsvapp_cluster']
class OVSvAppMitigatedClusterDbMixin(vapp_mc.OVSvAppMitigatedClusterPluginBase,
common_db_mixin.CommonDbMixin):
def get_ovsvapp_mitigated_cluster(self, context, vcenter_id, fields=None):
_admin_check(context, 'GET')
mitigated_info = vcenter_id.split(':')
vcenter_id = mitigated_info[0]
cluster_id = mitigated_info[1].replace('|', '/')
LOG.info(_LI("Retrieving mitigated information for vcenter_id"
" %s."), vcenter_id)
mitigated_cluster = dict()
try:
query = context.session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).one())
except sa_exc.NoResultFound:
_msg = ("No entry found for specified vCenter %(vcenter_id)s "
"cluster %(cluster_id)s") % {'vcenter_id': vcenter_id,
'cluster_id': cluster_id}
raise exc.InvalidInput(error_message=_msg)
mitigated_cluster['vcenter_id'] = cluster_row.vcenter_id
mitigated_cluster['cluster_id'] = cluster_row.cluster_id
mitigated_cluster['being_mitigated'] = cluster_row.being_mitigated
mitigated_cluster['threshold_reached'] = cluster_row.threshold_reached
return mitigated_cluster
def update_ovsvapp_mitigated_cluster(self, context, id,
ovsvapp_mitigated_cluster):
_admin_check(context, 'UPDATE')
res_dict = ovsvapp_mitigated_cluster['ovsvapp_mitigated_cluster']
vcenter_id = res_dict['vcenter_id']
cluster_id = res_dict['cluster_id']
update_flags = dict()
if 'being_mitigated' in res_dict:
update_flags['being_mitigated'] = res_dict['being_mitigated']
if 'threshold_reached' in res_dict:
update_flags['threshold_reached'] = res_dict['threshold_reached']
LOG.error(_LE("Updating the mitigation properties with "
"vCenter id %s."),
vcenter_id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(models.OVSvAppClusters)
cluster_row = (query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).with_lockmode('update').one())
cluster_row.update(update_flags)
except sa_exc.NoResultFound:
_msg = ("No entry found for specified vCenter %(vcenter_id)s"
" cluster %(cluster_id)s") % {'vcenter_id': vcenter_id,
'cluster_id': cluster_id}
raise exc.InvalidInput(error_message=_msg)
return res_dict
def get_ovsvapp_mitigated_clusters(self, context, filters=None,
fields=None):
_admin_check(context, 'GET')
db_filters = dict()
if filters:
for filter_entry in filters:
db_filters[filter_entry] = filters[filter_entry]
LOG.info(_LI("Retrieving mitigated information of all clusters."))
mitigated_clusters = list()
try:
all_entries = self._get_collection_query(context,
models.OVSvAppClusters,
filters=db_filters).all()
except sa_exc.NoResultFound:
raise exc.InvalidInput(error_message='Cannot retreive mitigated '
'information.')
for entry in all_entries:
mitigated_cluster = dict()
mitigated_cluster['vcenter_id'] = entry.vcenter_id
mitigated_cluster['cluster_id'] = entry.cluster_id
mitigated_cluster['being_mitigated'] = entry.being_mitigated
mitigated_cluster['threshold_reached'] = entry.threshold_reached
mitigated_clusters.append(mitigated_cluster)
return mitigated_clusters
def delete_ovsvapp_mitigated_cluster(self, context, id, filters=None):
_admin_check(context, 'DELETE')
mitigated_info = id.split(':')
if len(mitigated_info) != 2:
raise exc.InvalidInput(error_message='Invalid format..')
vcenter_id = mitigated_info[0]
cluster_id = mitigated_info[1].replace('|', '/')
LOG.info(_LI("Deleting mitigation entry with vCenter_id %s."),
vcenter_id)
with context.session.begin(subtransactions=True):
try:
query = context.session.query(models.OVSvAppClusters)
query = query.filter(
models.OVSvAppClusters.vcenter_id == vcenter_id,
models.OVSvAppClusters.cluster_id == cluster_id
).delete()
except sa_exc.NoResultFound:
_msg = ("No entry found for specified vCenter %(vcenter_id)s"
" cluster %(cluster_id)s") % {'vcenter_id': vcenter_id,
'cluster_id': cluster_id}
raise exc.InvalidInput(error_message=_msg)
|
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2014 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import gzip
from io import BytesIO
import json
import logging
import os
import posixpath
import re
try:
import threading
except ImportError:
import dummy_threading as threading
import zlib
from . import DistlibException
from .compat import (urljoin, urlparse, urlunparse, url2pathname, pathname2url,
queue, quote, unescape, string_types, build_opener,
HTTPRedirectHandler as BaseRedirectHandler,
Request, HTTPError, URLError)
from .database import Distribution, DistributionPath, make_dist
from .metadata import Metadata
from .util import (cached_property, parse_credentials, ensure_slash,
split_filename, get_project_data, parse_requirement,
parse_name_and_version, ServerProxy)
from .version import get_scheme, UnsupportedVersionError
from .wheel import Wheel, is_compatible
logger = logging.getLogger(__name__)
HASHER_HASH = re.compile('^(\w+)=([a-f0-9]+)')
CHARSET = re.compile(r';\s*charset\s*=\s*(.*)\s*$', re.I)
HTML_CONTENT_TYPE = re.compile('text/html|application/x(ht)?ml')
DEFAULT_INDEX = 'http://python.org/pypi'
def get_all_distribution_names(url=None):
"""
Return all distribution names known by an index.
:param url: The URL of the index.
:return: A list of all known distribution names.
"""
if url is None:
url = DEFAULT_INDEX
client = ServerProxy(url, timeout=3.0)
return client.list_packages()
class RedirectHandler(BaseRedirectHandler):
"""
A class to work around a bug in some Python 3.2.x releases.
"""
# There's a bug in the base version for some 3.2.x
# (e.g. 3.2.2 on Ubuntu Oneiric). If a Location header
# returns e.g. /abc, it bails because it says the scheme ''
# is bogus, when actually it should use the request's
# URL for the scheme. See Python issue #13696.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
newurl = None
for key in ('location', 'uri'):
if key in headers:
newurl = headers[key]
break
if newurl is None:
return
urlparts = urlparse(newurl)
if urlparts.scheme == '':
newurl = urljoin(req.get_full_url(), newurl)
if hasattr(headers, 'replace_header'):
headers.replace_header(key, newurl)
else:
headers[key] = newurl
return BaseRedirectHandler.http_error_302(self, req, fp, code, msg,
headers)
http_error_301 = http_error_303 = http_error_307 = http_error_302
class Locator(object):
"""
A base class for locators - things that locate distributions.
"""
source_extensions = ('.tar.gz', '.tar.bz2', '.tar', '.zip', '.tgz', '.tbz')
binary_extensions = ('.egg', '.exe', '.whl')
excluded_extensions = ('.pdf',)
# A list of tags indicating which wheels you want to match. The default
# value of None matches against the tags compatible with the running
# Python. If you want to match other values, set wheel_tags on a locator
# instance to a list of tuples (pyver, abi, arch) which you want to match.
wheel_tags = None
downloadable_extensions = source_extensions + ('.whl',)
def __init__(self, scheme='default'):
"""
Initialise an instance.
:param scheme: Because locators look for most recent versions, they
need to know the version scheme to use. This specifies
the current PEP-recommended scheme - use ``'legacy'``
if you need to support existing distributions on PyPI.
"""
self._cache = {}
self.scheme = scheme
# Because of bugs in some of the handlers on some of the platforms,
# we use our own opener rather than just using urlopen.
self.opener = build_opener(RedirectHandler())
# If get_project() is called from locate(), the matcher instance
# is set from the requirement passed to locate(). See issue #18 for
# why this can be useful to know.
self.matcher = None
def clear_cache(self):
self._cache.clear()
def _get_scheme(self):
return self._scheme
def _set_scheme(self, value):
self._scheme = value
scheme = property(_get_scheme, _set_scheme)
def _get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This should be implemented in subclasses.
If called from a locate() request, self.matcher will be set to a
matcher for the requirement to satisfy, otherwise it will be None.
"""
raise NotImplementedError('Please implement in the subclass')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Please implement in the subclass')
def get_project(self, name):
"""
For a given project, get a dictionary mapping available versions to Distribution
instances.
This calls _get_project to do all the work, and just implements a caching layer on top.
"""
if self._cache is None:
result = self._get_project(name)
elif name in self._cache:
result = self._cache[name]
else:
result = self._get_project(name)
self._cache[name] = result
return result
def score_url(self, url):
"""
Give an url a score which can be used to choose preferred URLs
for a given project release.
"""
t = urlparse(url)
return (t.scheme != 'https', 'pypi.python.org' in t.netloc,
posixpath.basename(t.path))
def prefer_url(self, url1, url2):
"""
Choose one of two URLs where both are candidates for distribution
archives for the same version of a distribution (for example,
.tar.gz vs. zip).
The current implement favours http:// URLs over https://, archives
from PyPI over those from other locations and then the archive name.
"""
result = url2
if url1:
s1 = self.score_url(url1)
s2 = self.score_url(url2)
if s1 > s2:
result = url1
if result != url2:
logger.debug('Not replacing %r with %r', url1, url2)
else:
logger.debug('Replacing %r with %r', url1, url2)
return result
def split_filename(self, filename, project_name):
"""
Attempt to split a filename in project name, version and Python version.
"""
return split_filename(filename, project_name)
def convert_url_to_download_info(self, url, project_name):
"""
See if a URL is a candidate for a download URL for a project (the URL
has typically been scraped from an HTML page).
If it is, a dictionary is returned with keys "name", "version",
"filename" and "url"; otherwise, None is returned.
"""
def same_project(name1, name2):
name1, name2 = name1.lower(), name2.lower()
if name1 == name2:
result = True
else:
# distribute replaces '-' by '_' in project names, so it
# can tell where the version starts in a filename.
result = name1.replace('_', '-') == name2.replace('_', '-')
return result
result = None
scheme, netloc, path, params, query, frag = urlparse(url)
if frag.lower().startswith('egg='):
logger.debug('%s: version hint in fragment: %r',
project_name, frag)
m = HASHER_HASH.match(frag)
if m:
algo, digest = m.groups()
else:
algo, digest = None, None
origpath = path
if path and path[-1] == '/':
path = path[:-1]
if path.endswith('.whl'):
try:
wheel = Wheel(path)
if is_compatible(wheel, self.wheel_tags):
if project_name is None:
include = True
else:
include = same_project(wheel.name, project_name)
if include:
result = {
'name': wheel.name,
'version': wheel.version,
'filename': wheel.filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
'python-version': ', '.join(
['.'.join(list(v[2:])) for v in wheel.pyver]),
}
except Exception as e:
logger.warning('invalid path for wheel: %s', path)
elif path.endswith(self.downloadable_extensions):
path = filename = posixpath.basename(path)
for ext in self.downloadable_extensions:
if path.endswith(ext):
path = path[:-len(ext)]
t = self.split_filename(path, project_name)
if not t:
logger.debug('No match for project/version: %s', path)
else:
name, version, pyver = t
if not project_name or same_project(project_name, name):
result = {
'name': name,
'version': version,
'filename': filename,
'url': urlunparse((scheme, netloc, origpath,
params, query, '')),
#'packagetype': 'sdist',
}
if pyver:
result['python-version'] = pyver
break
if result and algo:
result['%s_digest' % algo] = digest
return result
def _get_digest(self, info):
"""
Get a digest from a dictionary by looking at keys of the form
'algo_digest'.
Returns a 2-tuple (algo, digest) if found, else None. Currently
looks only for SHA256, then MD5.
"""
result = None
for algo in ('sha256', 'md5'):
key = '%s_digest' % algo
if key in info:
result = (algo, info[key])
break
return result
def _update_version_data(self, result, info):
"""
Update a result dictionary (the final result from _get_project) with a
dictionary for a specific version, which typically holds information
gleaned from a filename or URL for an archive for the distribution.
"""
name = info.pop('name')
version = info.pop('version')
if version in result:
dist = result[version]
md = dist.metadata
else:
dist = make_dist(name, version, scheme=self.scheme)
md = dist.metadata
dist.digest = digest = self._get_digest(info)
url = info['url']
result['digests'][url] = digest
if md.source_url != info['url']:
md.source_url = self.prefer_url(md.source_url, url)
result['urls'].setdefault(version, set()).add(url)
dist.locator = self
result[version] = dist
def locate(self, requirement, prereleases=False):
"""
Find the most recent distribution which matches the given
requirement.
:param requirement: A requirement of the form 'foo (1.0)' or perhaps
'foo (>= 1.0, < 2.0, != 1.3)'
:param prereleases: If ``True``, allow pre-release versions
to be located. Otherwise, pre-release versions
are not returned.
:return: A :class:`Distribution` instance, or ``None`` if no such
distribution could be located.
"""
result = None
r = parse_requirement(requirement)
if r is None:
raise DistlibException('Not a valid requirement: %r' % requirement)
scheme = get_scheme(self.scheme)
self.matcher = matcher = scheme.matcher(r.requirement)
logger.debug('matcher: %s (%s)', matcher, type(matcher).__name__)
versions = self.get_project(r.name)
if versions:
# sometimes, versions are invalid
slist = []
vcls = matcher.version_class
for k in versions:
try:
if not matcher.match(k):
logger.debug('%s did not match %r', matcher, k)
else:
if prereleases or not vcls(k).is_prerelease:
slist.append(k)
else:
logger.debug('skipping pre-release '
'version %s of %s', k, matcher.name)
except Exception:
logger.warning('error matching %s with %r', matcher, k)
pass # slist.append(k)
if len(slist) > 1:
slist = sorted(slist, key=scheme.key)
if slist:
logger.debug('sorted list: %s', slist)
version = slist[-1]
result = versions[version]
if result:
if r.extras:
result.extras = r.extras
result.download_urls = versions.get('urls', {}).get(version, set())
d = {}
sd = versions.get('digests', {})
for url in result.download_urls:
if url in sd:
d[url] = sd[url]
result.digests = d
self.matcher = None
return result
class PyPIRPCLocator(Locator):
"""
This locator uses XML-RPC to locate distributions. It therefore
cannot be used with simple mirrors (that only mirror file content).
"""
def __init__(self, url, **kwargs):
"""
Initialise an instance.
:param url: The URL to use for XML-RPC.
:param kwargs: Passed to the superclass constructor.
"""
super(PyPIRPCLocator, self).__init__(**kwargs)
self.base_url = url
self.client = ServerProxy(url, timeout=3.0)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
return set(self.client.list_packages())
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
versions = self.client.package_releases(name, True)
for v in versions:
urls = self.client.release_urls(name, v)
data = self.client.release_data(name, v)
metadata = Metadata(scheme=self.scheme)
metadata.name = data['name']
metadata.version = data['version']
metadata.license = data.get('license')
metadata.keywords = data.get('keywords', [])
metadata.summary = data.get('summary')
dist = Distribution(metadata)
if urls:
info = urls[0]
metadata.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[v] = dist
for info in urls:
url = info['url']
digest = self._get_digest(info)
result['urls'].setdefault(v, set()).add(url)
result['digests'][url] = digest
return result
class PyPIJSONLocator(Locator):
"""
This locator uses PyPI's JSON interface. It's very limited in functionality
and probably not worth using.
"""
def __init__(self, url, **kwargs):
super(PyPIJSONLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
url = urljoin(self.base_url, '%s/json' % quote(name))
try:
resp = self.opener.open(url)
data = resp.read().decode() # for now
d = json.loads(data)
md = Metadata(scheme=self.scheme)
data = d['info']
md.name = data['name']
md.version = data['version']
md.license = data.get('license')
md.keywords = data.get('keywords', [])
md.summary = data.get('summary')
dist = Distribution(md)
urls = d['urls']
if urls:
info = urls[0]
md.source_url = info['url']
dist.digest = self._get_digest(info)
dist.locator = self
result[md.version] = dist
for info in urls:
url = info['url']
result['urls'].setdefault(md.version, set()).add(url)
result['digests'][url] = digest
except Exception as e:
logger.exception('JSON fetch failed: %s', e)
return result
class Page(object):
"""
This class represents a scraped HTML page.
"""
# The following slightly hairy-looking regex just looks for the contents of
# an anchor link, which has an attribute "href" either immediately preceded
# or immediately followed by a "rel" attribute. The attribute values can be
# declared with double quotes, single quotes or no quotes - which leads to
# the length of the expression.
_href = re.compile("""
(rel\s*=\s*(?:"(?P<rel1>[^"]*)"|'(?P<rel2>[^']*)'|(?P<rel3>[^>\s\n]*))\s+)?
href\s*=\s*(?:"(?P<url1>[^"]*)"|'(?P<url2>[^']*)'|(?P<url3>[^>\s\n]*))
(\s+rel\s*=\s*(?:"(?P<rel4>[^"]*)"|'(?P<rel5>[^']*)'|(?P<rel6>[^>\s\n]*)))?
""", re.I | re.S | re.X)
_base = re.compile(r"""<base\s+href\s*=\s*['"]?([^'">]+)""", re.I | re.S)
def __init__(self, data, url):
"""
Initialise an instance with the Unicode page contents and the URL they
came from.
"""
self.data = data
self.base_url = self.url = url
m = self._base.search(self.data)
if m:
self.base_url = m.group(1)
_clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I)
@cached_property
def links(self):
"""
Return the URLs of all the links on a page together with information
about their "rel" attribute, for determining which ones to treat as
downloads and which ones to queue for further scraping.
"""
def clean(url):
"Tidy up an URL."
scheme, netloc, path, params, query, frag = urlparse(url)
return urlunparse((scheme, netloc, quote(path),
params, query, frag))
result = set()
for match in self._href.finditer(self.data):
d = match.groupdict('')
rel = (d['rel1'] or d['rel2'] or d['rel3'] or
d['rel4'] or d['rel5'] or d['rel6'])
url = d['url1'] or d['url2'] or d['url3']
url = urljoin(self.base_url, url)
url = unescape(url)
url = self._clean_re.sub(lambda m: '%%%2x' % ord(m.group(0)), url)
result.add((url, rel))
# We sort the result, hoping to bring the most recent versions
# to the front
result = sorted(result, key=lambda t: t[0], reverse=True)
return result
class SimpleScrapingLocator(Locator):
"""
A locator which scrapes HTML pages to locate downloads for a distribution.
This runs multiple threads to do the I/O; performance is at least as good
as pip's PackageFinder, which works in an analogous fashion.
"""
# These are used to deal with various Content-Encoding schemes.
decoders = {
'deflate': zlib.decompress,
'gzip': lambda b: gzip.GzipFile(fileobj=BytesIO(d)).read(),
'none': lambda b: b,
}
def __init__(self, url, timeout=None, num_workers=10, **kwargs):
"""
Initialise an instance.
:param url: The root URL to use for scraping.
:param timeout: The timeout, in seconds, to be applied to requests.
This defaults to ``None`` (no timeout specified).
:param num_workers: The number of worker threads you want to do I/O,
This defaults to 10.
:param kwargs: Passed to the superclass.
"""
super(SimpleScrapingLocator, self).__init__(**kwargs)
self.base_url = ensure_slash(url)
self.timeout = timeout
self._page_cache = {}
self._seen = set()
self._to_fetch = queue.Queue()
self._bad_hosts = set()
self.skip_externals = False
self.num_workers = num_workers
self._lock = threading.RLock()
# See issue #45: we need to be resilient when the locator is used
# in a thread, e.g. with concurrent.futures. We can't use self._lock
# as it is for coordinating our internal threads - the ones created
# in _prepare_threads.
self._gplock = threading.RLock()
def _prepare_threads(self):
"""
Threads are created only when get_project is called, and terminate
before it returns. They are there primarily to parallelise I/O (i.e.
fetching web pages).
"""
self._threads = []
for i in range(self.num_workers):
t = threading.Thread(target=self._fetch)
t.setDaemon(True)
t.start()
self._threads.append(t)
def _wait_threads(self):
"""
Tell all the threads to terminate (by sending a sentinel value) and
wait for them to do so.
"""
# Note that you need two loops, since you can't say which
# thread will get each sentinel
for t in self._threads:
self._to_fetch.put(None) # sentinel
for t in self._threads:
t.join()
self._threads = []
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
with self._gplock:
self.result = result
self.project_name = name
url = urljoin(self.base_url, '%s/' % quote(name))
self._seen.clear()
self._page_cache.clear()
self._prepare_threads()
try:
logger.debug('Queueing %s', url)
self._to_fetch.put(url)
self._to_fetch.join()
finally:
self._wait_threads()
del self.result
return result
platform_dependent = re.compile(r'\b(linux-(i\d86|x86_64|arm\w+)|'
r'win(32|-amd64)|macosx-?\d+)\b', re.I)
def _is_platform_dependent(self, url):
"""
Does an URL refer to a platform-specific download?
"""
return self.platform_dependent.search(url)
def _process_download(self, url):
"""
See if an URL is a suitable download for a project.
If it is, register information in the result dictionary (for
_get_project) about the specific version it's for.
Note that the return value isn't actually used other than as a boolean
value.
"""
if self._is_platform_dependent(url):
info = None
else:
info = self.convert_url_to_download_info(url, self.project_name)
logger.debug('process_download: %s -> %s', url, info)
if info:
with self._lock: # needed because self.result is shared
self._update_version_data(self.result, info)
return info
def _should_queue(self, link, referrer, rel):
"""
Determine whether a link URL from a referring page and with a
particular "rel" attribute should be queued for scraping.
"""
scheme, netloc, path, _, _, _ = urlparse(link)
if path.endswith(self.source_extensions + self.binary_extensions +
self.excluded_extensions):
result = False
elif self.skip_externals and not link.startswith(self.base_url):
result = False
elif not referrer.startswith(self.base_url):
result = False
elif rel not in ('homepage', 'download'):
result = False
elif scheme not in ('http', 'https', 'ftp'):
result = False
elif self._is_platform_dependent(link):
result = False
else:
host = netloc.split(':', 1)[0]
if host.lower() == 'localhost':
result = False
else:
result = True
logger.debug('should_queue: %s (%s) from %s -> %s', link, rel,
referrer, result)
return result
def _fetch(self):
"""
Get a URL to fetch from the work queue, get the HTML page, examine its
links for download candidates and candidates for further scraping.
This is a handy method to run in a thread.
"""
while True:
url = self._to_fetch.get()
try:
if url:
page = self.get_page(url)
if page is None: # e.g. after an error
continue
for link, rel in page.links:
if link not in self._seen:
self._seen.add(link)
if (not self._process_download(link) and
self._should_queue(link, url, rel)):
logger.debug('Queueing %s from %s', link, url)
self._to_fetch.put(link)
finally:
# always do this, to avoid hangs :-)
self._to_fetch.task_done()
if not url:
#logger.debug('Sentinel seen, quitting.')
break
def get_page(self, url):
"""
Get the HTML for an URL, possibly from an in-memory cache.
XXX TODO Note: this cache is never actually cleared. It's assumed that
the data won't get stale over the lifetime of a locator instance (not
necessarily true for the default_locator).
"""
# http://peak.telecommunity.com/DevCenter/EasyInstall#package-index-api
scheme, netloc, path, _, _, _ = urlparse(url)
if scheme == 'file' and os.path.isdir(url2pathname(path)):
url = urljoin(ensure_slash(url), 'index.html')
if url in self._page_cache:
result = self._page_cache[url]
logger.debug('Returning %s from cache: %s', url, result)
else:
host = netloc.split(':', 1)[0]
result = None
if host in self._bad_hosts:
logger.debug('Skipping %s due to bad host %s', url, host)
else:
req = Request(url, headers={'Accept-encoding': 'identity'})
try:
logger.debug('Fetching %s', url)
resp = self.opener.open(req, timeout=self.timeout)
logger.debug('Fetched %s', url)
headers = resp.info()
content_type = headers.get('Content-Type', '')
if HTML_CONTENT_TYPE.match(content_type):
final_url = resp.geturl()
data = resp.read()
encoding = headers.get('Content-Encoding')
if encoding:
decoder = self.decoders[encoding] # fail if not found
data = decoder(data)
encoding = 'utf-8'
m = CHARSET.search(content_type)
if m:
encoding = m.group(1)
try:
data = data.decode(encoding)
except UnicodeError:
data = data.decode('latin-1') # fallback
result = Page(data, final_url)
self._page_cache[final_url] = result
except HTTPError as e:
if e.code != 404:
logger.exception('Fetch failed: %s: %s', url, e)
except URLError as e:
logger.exception('Fetch failed: %s: %s', url, e)
with self._lock:
self._bad_hosts.add(host)
except Exception as e:
logger.exception('Fetch failed: %s: %s', url, e)
finally:
self._page_cache[url] = result # even if None (failure)
return result
_distname_re = re.compile('<a href=[^>]*>([^<]+)<')
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
page = self.get_page(self.base_url)
if not page:
raise DistlibException('Unable to get %s' % self.base_url)
for match in self._distname_re.finditer(page.data):
result.add(match.group(1))
return result
class DirectoryLocator(Locator):
"""
This class locates distributions in a directory tree.
"""
def __init__(self, path, **kwargs):
"""
Initialise an instance.
:param path: The root of the directory tree to search.
:param kwargs: Passed to the superclass constructor,
except for:
* recursive - if True (the default), subdirectories are
recursed into. If False, only the top-level directory
is searched,
"""
self.recursive = kwargs.pop('recursive', True)
super(DirectoryLocator, self).__init__(**kwargs)
path = os.path.abspath(path)
if not os.path.isdir(path):
raise DistlibException('Not a directory: %r' % path)
self.base_dir = path
def should_include(self, filename, parent):
"""
Should a filename be considered as a candidate for a distribution
archive? As well as the filename, the directory which contains it
is provided, though not used by the current implementation.
"""
return filename.endswith(self.downloadable_extensions)
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, name)
if info:
self._update_version_data(result, info)
if not self.recursive:
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for root, dirs, files in os.walk(self.base_dir):
for fn in files:
if self.should_include(fn, root):
fn = os.path.join(root, fn)
url = urlunparse(('file', '',
pathname2url(os.path.abspath(fn)),
'', '', ''))
info = self.convert_url_to_download_info(url, None)
if info:
result.add(info['name'])
if not self.recursive:
break
return result
class JSONLocator(Locator):
"""
This locator uses special extended metadata (not available on PyPI) and is
the basis of performant dependency resolution in distlib. Other locators
require archive downloads before dependencies can be determined! As you
might imagine, that can be slow.
"""
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
raise NotImplementedError('Not available from this locator')
def _get_project(self, name):
result = {'urls': {}, 'digests': {}}
data = get_project_data(name)
if data:
for info in data.get('files', []):
if info['ptype'] != 'sdist' or info['pyversion'] != 'source':
continue
# We don't store summary in project metadata as it makes
# the data bigger for no benefit during dependency
# resolution
dist = make_dist(data['name'], info['version'],
summary=data.get('summary',
'Placeholder for summary'),
scheme=self.scheme)
md = dist.metadata
md.source_url = info['url']
# TODO SHA256 digest
if 'digest' in info and info['digest']:
dist.digest = ('md5', info['digest'])
md.dependencies = info.get('requirements', {})
dist.exports = info.get('exports', {})
result[dist.version] = dist
result['urls'].setdefault(dist.version, set()).add(info['url'])
return result
class DistPathLocator(Locator):
"""
This locator finds installed distributions in a path. It can be useful for
adding to an :class:`AggregatingLocator`.
"""
def __init__(self, distpath, **kwargs):
"""
Initialise an instance.
:param distpath: A :class:`DistributionPath` instance to search.
"""
super(DistPathLocator, self).__init__(**kwargs)
assert isinstance(distpath, DistributionPath)
self.distpath = distpath
def _get_project(self, name):
dist = self.distpath.get_distribution(name)
if dist is None:
result = {}
else:
result = {
dist.version: dist,
'urls': {dist.version: set([dist.source_url])}
}
return result
class AggregatingLocator(Locator):
"""
This class allows you to chain and/or merge a list of locators.
"""
def __init__(self, *locators, **kwargs):
"""
Initialise an instance.
:param locators: The list of locators to search.
:param kwargs: Passed to the superclass constructor,
except for:
* merge - if False (the default), the first successful
search from any of the locators is returned. If True,
the results from all locators are merged (this can be
slow).
"""
self.merge = kwargs.pop('merge', False)
self.locators = locators
super(AggregatingLocator, self).__init__(**kwargs)
def clear_cache(self):
super(AggregatingLocator, self).clear_cache()
for locator in self.locators:
locator.clear_cache()
def _set_scheme(self, value):
self._scheme = value
for locator in self.locators:
locator.scheme = value
scheme = property(Locator.scheme.fget, _set_scheme)
def _get_project(self, name):
result = {}
for locator in self.locators:
d = locator.get_project(name)
if d:
if self.merge:
files = result.get('urls', {})
digests = result.get('digests', {})
# next line could overwrite result['urls'], result['digests']
result.update(d)
df = result.get('urls')
if files and df:
for k, v in files.items():
if k in df:
df[k] |= v
else:
df[k] = v
dd = result.get('digests')
if digests and dd:
dd.update(digests)
else:
# See issue #18. If any dists are found and we're looking
# for specific constraints, we only return something if
# a match is found. For example, if a DirectoryLocator
# returns just foo (1.0) while we're looking for
# foo (>= 2.0), we'll pretend there was nothing there so
# that subsequent locators can be queried. Otherwise we
# would just return foo (1.0) which would then lead to a
# failure to find foo (>= 2.0), because other locators
# weren't searched. Note that this only matters when
# merge=False.
if self.matcher is None:
found = True
else:
found = False
for k in d:
if self.matcher.match(k):
found = True
break
if found:
result = d
break
return result
def get_distribution_names(self):
"""
Return all the distribution names known to this locator.
"""
result = set()
for locator in self.locators:
try:
result |= locator.get_distribution_names()
except NotImplementedError:
pass
return result
# We use a legacy scheme simply because most of the dists on PyPI use legacy
# versions which don't conform to PEP 426 / PEP 440.
default_locator = AggregatingLocator(
JSONLocator(),
SimpleScrapingLocator('https://pypi.python.org/simple/',
timeout=3.0),
scheme='legacy')
locate = default_locator.locate
NAME_VERSION_RE = re.compile(r'(?P<name>[\w-]+)\s*'
r'\(\s*(==\s*)?(?P<ver>[^)]+)\)$')
class DependencyFinder(object):
"""
Locate dependencies for distributions.
"""
def __init__(self, locator=None):
"""
Initialise an instance, using the specified locator
to locate distributions.
"""
self.locator = locator or default_locator
self.scheme = get_scheme(self.locator.scheme)
def add_distribution(self, dist):
"""
Add a distribution to the finder. This will update internal information
about who provides what.
:param dist: The distribution to add.
"""
logger.debug('adding distribution %s', dist)
name = dist.key
self.dists_by_name[name] = dist
self.dists[(name, dist.version)] = dist
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Add to provided: %s, %s, %s', name, version, dist)
self.provided.setdefault(name, set()).add((version, dist))
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name]
def get_matcher(self, reqt):
"""
Get a version matcher for a requirement.
:param reqt: The requirement
:type reqt: str
:return: A version matcher (an instance of
:class:`distlib.version.Matcher`).
"""
try:
matcher = self.scheme.matcher(reqt)
except UnsupportedVersionError:
# XXX compat-mode if cannot read the version
name = reqt.split()[0]
matcher = self.scheme.matcher(name)
return matcher
def find_providers(self, reqt):
"""
Find the distributions which can fulfill a requirement.
:param reqt: The requirement.
:type reqt: str
:return: A set of distribution which can fulfill the requirement.
"""
matcher = self.get_matcher(reqt)
name = matcher.key # case-insensitive
result = set()
provided = self.provided
if name in provided:
for version, provider in provided[name]:
try:
match = matcher.match(version)
except UnsupportedVersionError:
match = False
if match:
result.add(provider)
break
return result
def try_to_replace(self, provider, other, problems):
"""
Attempt to replace one provider with another. This is typically used
when resolving dependencies from multiple sources, e.g. A requires
(B >= 1.0) while C requires (B >= 1.1).
For successful replacement, ``provider`` must meet all the requirements
which ``other`` fulfills.
:param provider: The provider we are trying to replace with.
:param other: The provider we're trying to replace.
:param problems: If False is returned, this will contain what
problems prevented replacement. This is currently
a tuple of the literal string 'cantreplace',
``provider``, ``other`` and the set of requirements
that ``provider`` couldn't fulfill.
:return: True if we can replace ``other`` with ``provider``, else
False.
"""
rlist = self.reqts[other]
unmatched = set()
for s in rlist:
matcher = self.get_matcher(s)
if not matcher.match(provider.version):
unmatched.add(s)
if unmatched:
# can't replace other with provider
problems.add(('cantreplace', provider, other,
frozenset(unmatched)))
result = False
else:
# can replace other with provider
self.remove_distribution(other)
del self.reqts[other]
for s in rlist:
self.reqts.setdefault(provider, set()).add(s)
self.add_distribution(provider)
result = True
return result
def find(self, requirement, meta_extras=None, prereleases=False):
"""
Find a distribution and all distributions it depends on.
:param requirement: The requirement specifying the distribution to
find, or a Distribution instance.
:param meta_extras: A list of meta extras such as :test:, :build: and
so on.
:param prereleases: If ``True``, allow pre-release versions to be
returned - otherwise, don't return prereleases
unless they're all that's available.
Return a set of :class:`Distribution` instances and a set of
problems.
The distributions returned should be such that they have the
:attr:`required` attribute set to ``True`` if they were
from the ``requirement`` passed to ``find()``, and they have the
:attr:`build_time_dependency` attribute set to ``True`` unless they
are post-installation dependencies of the ``requirement``.
The problems should be a tuple consisting of the string
``'unsatisfied'`` and the requirement which couldn't be satisfied
by any distribution known to the locator.
"""
self.provided = {}
self.dists = {}
self.dists_by_name = {}
self.reqts = {}
meta_extras = set(meta_extras or [])
if ':*:' in meta_extras:
meta_extras.remove(':*:')
# :meta: and :run: are implicitly included
meta_extras |= set([':test:', ':build:', ':dev:'])
if isinstance(requirement, Distribution):
dist = odist = requirement
logger.debug('passed %s as requirement', odist)
else:
dist = odist = self.locator.locate(requirement,
prereleases=prereleases)
if dist is None:
raise DistlibException('Unable to locate %r' % requirement)
logger.debug('located %s', odist)
dist.requested = True
problems = set()
todo = set([dist])
install_dists = set([odist])
while todo:
dist = todo.pop()
name = dist.key # case-insensitive
if name not in self.dists_by_name:
self.add_distribution(dist)
else:
#import pdb; pdb.set_trace()
other = self.dists_by_name[name]
if other != dist:
self.try_to_replace(dist, other, problems)
ireqts = dist.run_requires | dist.meta_requires
sreqts = dist.build_requires
ereqts = set()
if dist in install_dists:
for key in ('test', 'build', 'dev'):
e = ':%s:' % key
if e in meta_extras:
ereqts |= getattr(dist, '%s_requires' % key)
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts:
providers = self.find_providers(r)
if not providers:
logger.debug('No providers found for %r', r)
provider = self.locator.locate(r, prereleases=prereleases)
# If no provider is found and we didn't consider
# prereleases, consider them now.
if provider is None and not prereleases:
provider = self.locator.locate(r, prereleases=True)
if provider is None:
logger.debug('Cannot satisfy %r', r)
problems.add(('unsatisfied', r))
else:
n, v = provider.key, provider.version
if (n, v) not in self.dists:
todo.add(provider)
providers.add(provider)
if r in ireqts and dist in install_dists:
install_dists.add(provider)
logger.debug('Adding %s to install_dists',
provider.name_and_version)
for p in providers:
name = p.key
if name not in self.dists_by_name:
self.reqts.setdefault(p, set()).add(r)
else:
other = self.dists_by_name[name]
if other != p:
# see if other can be replaced by p
self.try_to_replace(p, other, problems)
dists = set(self.dists.values())
for dist in dists:
dist.build_time_dependency = dist not in install_dists
if dist.build_time_dependency:
logger.debug('%s is a build-time dependency only.',
dist.name_and_version)
logger.debug('find done for %s', odist)
return dists, problems
|
|
import pandas as pd
import glob
import os
import numpy as np
import gc
import logging
import mne
logger = logging.getLogger('__SignalManager__')
"""Utility that acts as a wrapper around the Python Pandas package, which is useful for handling time series data efficiently.
In particular, the management tool helps to sanity check, align and utilise experimentally relevant epochs in multi channel data"""
__author__ = 'Andrew O\Harney'
class SignalManager:
"""This class manages signals in edf,fif,or hd5 format (note all files are converted to hd5)
It makes extensive use of pandas to represent time series and events logs
Requirements - pandas,numpy,matplotlib
- mne (utility functions)
- matplotlib (utility functions/plotting)"""
############Member Variables################
__base_file_name = None
__signals = None #Pandas Data frame columns=chans, index = times
__log_file = None #Events log file
__wd = None #Working data
__wc = None #Working Channels
__eventskeys = None #Codes for events in log
__currentMeanCalcChans = None #Channels used to calculate current mean
__currentMeanApplyChans = None #Channels means were applied to
###############Private Methods##########################
def __init__(self,base_file_name=None,log_file=None,offsets=None,new_log_out=False,eventsKey=None):
"""Initialiser takes the path of the signal data
#Can also set the event matrix or generate it from a log path
Keyword arguments:
base_file_name=None -- The path and name of the signal data with no file extension
log_file=None -- Path to log file of events
offsets=None -- If a log file has been provided, startimes is a path to a file containing appropriate offets for each block of events (if required - often useful for alignment)
new_log_out=None -- If offsets is specified then new_log_out is a Boolean value if the corrected file is to be output
eventsKey=None -- If special event codes are required (i.e not from the csv file)"""
self.__base_file_name = base_file_name
logger.debug('Using file : %s'%(base_file_name))
self.__load_data__()
if eventsKey is None:
eventsKey = {'_':0,'blockStart':1}
self.set_eventsKey(eventsKey)
#Check for log file to create event matrix
if log_file:
self.set_log_file(log_file,offsets,new_log_out)
else:
logger.info( 'No log specified -- assuming event matrix is in the data')
def __load_data__(self):
"""Attempts to load data from .hd5
If .hd5 file does not exist, it will try to convert to it"""
if self.base_file_name() is None:
raise Exception('Data was not specified')
elif self.__check_for_files__('hd5'): #HD5 is the basis for pytables
logger.info( 'Found .hd5 -- opening')
elif self.__check_for_files__('fif'):
logger.info( 'Could not find .hd5 -- converting .fif->.hd5')
self.__fif_2_hdf5__()
elif self.__check_for_files__('edf'):
logger.info( 'Could not find .hd5 -- converting .edf->.fif->.hd5')
self.__edf_2_fif__()
self.__fif_2_hdf5__()
else:
logger.info( "Could not find any appropriate files. Valid files are *.[edf, fif, hd5]. Assuming data will be supplied later")
self.__open_hdf5__()
#except Exception as e: raise e
def __check_for_files__(self, ftype):
"""Returns a list of files in the local Directory containing the file type
Keyword Arguments:
ftype -- Type of file to check for"""
return glob.glob(self.__base_file_name + '.' + ftype)
def __open_hdf5__(self):
"""Tries to open hd5 file"""
try:
self.__signals = pd.HDFStore(self.__base_file_name+'.hd5')
except:
logger.warning('Could not open hd5 file')
raise Exception('Could not open hd5 file')
def __edf_2_fif__(self):
""""Tries to convert edf to fif"""
sysString = 'mne_edf2fiff --edf '+self.base_file_name()+'.edf --fif ' + self.base_file_name()+'.fif'
logger.info('System Command : %s'%(sysString))
try:
os.system(sysString)
logger.info( 'Conversion edf->fif complete')
except:
logger.warning('Could not find mne on system path')
raise Exception('Could not find mne on system path -- cannot convert from .edf')
def removeNonImportant(self,importantEventCodes):
#NEED TO THINK ABOUT THIS ONE
pass
#Will remove parts of the signal not important to the experiment
em = self.event_matrix()
em = em[em['event.code'].isin(importantEventCodes)]
exptimes = self.eventsTimes(em)
self.__signals['Data'] = self.__signals['Data'].ix[exptimes]
self.__signals.flush()
def __fif_2_hdf5__(self):
"""Tries to convert .fif file to .hd5 format"""
#Get data from the raw .fif file
try:
raw = mne.fiff.Raw(self.__base_file_name+'.fif')
except:
logger.warning('Could not open fif file')
raise Exception("Could not open fif file")
logger.debug( 'Extracting data from .fif')
data,time_stamps = raw[1:,:raw.last_samp]
ch_names = raw.ch_names[1:]
logger.debug('Found channels : %s'%(str(ch_names)))
fs = raw.info['sfreq']
logger.debug('Found frequency : %f'%(fs))
raw.close()
self.save_hdf(data,time_stamps,ch_names,fs,self.base_file_name())
self.__open_hdf5__()
def __create_events_matrix__(self):
"""Creates a Dataframe with index=data timestamps times, columns=signal channels"""
logger.info( "Generating event matrix")
events = pd.read_csv(self.__log_file,delimiter=r'\t|,')
logger.debug( 'Found columns:'+str(events.columns))
self.__signals['event_matrix'] = events
self.__flushSignals__()
logger.info( "Saving event matrix")
self.__find_blocks__()
def __find_blocks__(self):
"""Finds the on and off times of blocks
Note: Blocks are defined as starting at event types blockStart(event id 1)"""
logger.info( "Finding blocks")
logger.debug( '\tCalculating block indices')
em = self.event_matrix()
blockStartIndices = em[em['event.code'] == self.__eventskey['blockStart']].index #Start of each block
logger.debug(blockStartIndices)
blockEndIndices = blockStartIndices
blockEndIndices = blockEndIndices[1:].values - 2 #Remove the first pulse and shift to become last pulse in each preceding block
blockEndIndices = np.append(blockEndIndices, len(em) - 1) #Add final pulse in file
#Define the times of each block
logger.debug( '\tCalculating start and end times of each block')
startTimes = em.ix[blockStartIndices]['pulse.on'].values
endTimes = em.ix[blockEndIndices]['pulse.off'].values
logger.debug('Start times '+str(startTimes))
logger.debug('End times '+str(endTimes))
blocks = pd.DataFrame([startTimes,endTimes])
blocks = blocks.T
blocks.columns=['pulse.on','pulse.off']
logger.info( "Saving blocks")
self.__signals['blocks'] = blocks
self.__flushSignals__()
def __flushSignals__(self):
""""Forces a write to the hdf file"""
self.__signals.flush()
###############Public Methods#################################
def set_eventsKey(self,eventsKey):
"""Set a dictionary containing event code descriptions
Keyword Arguments:
eventsKey -- Dictionary containing event codes and label names"""
self.__eventskey = eventsKey
def eventsKey(self):
"""Get events key"""
return self.__eventskey
@staticmethod
def save_hdf(data,times,cnames,fs,base_file_name):
"""Takes raw data and saves to HD5
Keyword arguments:
#data - raw signal data
#cnames - channel names
#base_file_name - base file name
#fs - sample rate of data"""
(x,y)= data.shape
#Store in hd5(pytables) format
logger.info( "Converting to pytables")
#signals = pd.HDFStore(base_file_name+'.hd5','w',complevel=9)
signals = pd.HDFStore(base_file_name+'.hd5','w')
#
logger.debug( '\tSaving timing info')
signals['times'] = pd.Series(times,dtype='float64')
#
logger.debug( '\tSaving data')
signals['data']=pd.DataFrame(data.T,columns=cnames,index=times) #Ideally this would be tables=True
#
logger.debug( "\tSaving meta data")
signals['channels'] = pd.Series(cnames)
signals['fs'] = pd.Series(fs)
#signals['data_dimensions'] = pd.Series(['channels', 'samples'])
signals.close()
logger.info( 'Conversion complete')
def add_channel(self,sig,name):
"""Adds a channel to the hd5 file
Keyword Arguments:
sig -- the raw signal to be added
name -- name of the new channel"""
if name not in self.channels():
newData = self.data()
newData[name] = pd.Series(sig,name=[name],index=self.data().index)
self.__signals['data'] = newData
self.__signals['channels'] = self.channels().append(pd.Series(name,index=[len(self.channels())]))
self.__signals.flush()
else:
logger.info( 'Channel with that name already exists')
def remove_channel(self,chan):
"""Removes channel chan from the persistent .hd5 file
Keyword Arguments:
chan -- Channel to be removed"""
#Try and remove the specified channel
try:
self.__signals['data'] = self.data().drop(chan,axis=1)
#Remove from channel record
self.__signals['channels'] = self.channels()[self.channels()!=chan]
self.__flushSignals__()
#If the channel was in the current working set
currentChan = self.wc()
if chan in currentChan:
currentChan.remove(chan)
if self.__currentMeanCalcChans is not None and chan in self.__currentMeanCalcChans:
self.set_wd(currentChan,meanCalcChans=[mc for mc in self.__currentMeanCalcChans if mc != chan],meanApplyChans=[mc for mc in self.__currentMeanApplyChans if mc != chan])
else:
self.set_wd(currentChan)
except:
logger.info( 'No channel called '+chan)
#################Public Methods#########################
def blocks(self):
"""Return the on and off times of blocks"""
return self.__signals['blocks']
def base_file_name(self):
"""Return the base file path"""
return self.__base_file_name
def log_file_name(self):
""""Returns the log path of the psychopy file in use (i.e the file the event_matrix was generated from)"""
return self.__log_file
def event_matrix(self,types=None):
"""Returns the event matrix
Keyword Arguments:
types -- Event types (codes to return) - Still to be implemented
"""
return self.__signals['event_matrix']
def calc_mean(self,channels):
"""Return the mean channel (i.e the mean power across all channels for a given time point)
channels -- Channels to calculate the mean over"""
return pd.Series(self.data(channels=channels).mean(axis=1),index=self.times())
def set_mean(self,meanCalcChans=None,meanApplyChans=None):
"""Remove a mean value from channels
meanCalcChans=None : channels to calculate the mean from - mean will be applied to these channels unless meanApplyChans is specified
meanApplyChans=None : channels to apply the mean to (default is meanCalcChans)"""
if meanCalcChans is not None:
logger.info( 'Calculating mean')
m = self.calc_mean(meanCalcChans)
permMeanChans = []
for chan in meanApplyChans if (meanApplyChans is not None) else meanCalcChans:
logger.debug('Calculating mean for channel '+chan)
self.__wd[chan] -= m #Cannot use .sub() - blows up!
permMeanChans.append(chan)
self.__currentMeanCalcChans = permMeanChans
return m
def data(self,channels=None):
"""Efficiently get data chunks from disk by supplying a column list (Note:data must be in table format)
Keyword Arguments:
channels=None -- The channels to pull from the data (This should be done efficiently but needs to be reviewed)"""
if channels is not None:
try:
#Efficient read from disk (no need to load all data in memory) if data is in table format
d = self.__signals.select('data', [pd.Term('columns','=',channels)])
except:
#If in pytables format then we need to load all data into memory and clip
d= self.__signals['data'][channels]
else:
d= self.__signals['data']
gc.collect() #Do garbage collection to free up wasted memory
return d
def times(self):
"""Returns the timestamps of samples"""
return self.__signals['times']
def channels(self):
"""Returns all channel names"""
return self.__signals['channels']
def wd(self,channels=None):
"""Return the current working data
Keyword arguments:
channels=None -- Channels to pull from working data"""
if self.__wd is not None:
return self.__wd[self.wc() if channels is None else channels]
else:
logger.debug( "No working data was set")
def fs(self):
"""Return the sample rate of the signal"""
return self.__signals['fs'][0]
def correct_event_times(self,offsets,new_log_out=False):
"""Correct the event matrix to include the appropriate block offsets
#Keyword Arguments
Offsets -- A Pandas Dataframe or Series with ['time'] offsets for each block
new_log_out=None -- Boolean value if the corrected file is to be output"""
logger.info( 'Correcting times in log file')
offsets = pd.read_csv(offsets,sep='\t')
blocks = self.blocks()
startTimes = blocks['pulse.on']
offsets = pd.Series(offsets['time']-startTimes,index=range(len(offsets))) #Remove the psychopy start time from the offset
#Correct block times by the offsets
logger.debug( "\tCorrecting blocks data")
blocks['pulse.on']+=offsets
blocks['pulse.off']+=offsets
self.__signals['blocks'] = blocks
logger.debug(blocks)
logger.debug( '\tCorrecting event times')
em = self.event_matrix()
for i,block in enumerate(em['Block'].unique()):
em.ix[em['Block']==block,'pulse.on']+= offsets.ix[i]
em.ix[em['Block']==block,'pulse.off']+= offsets.ix[i]
self.__signals['event_matrix'] = em
self.__flushSignals__()
if new_log_out:
logger.info( "Saving corrected log file")
self.__signals['event_matrix'].to_csv(self.__base_file_name+'_corrected_log.csv')
self.__log_file = self.__base_file_name+'_corrected_log.csv'
def set_log_file(self,log,offsets=None,new_log_out=False):
"""Sets the psychopy log file
Keyword Arguments:
log -- path to log file
offsets=None -- path to file contains offsets of block times
new_log_out=False -- Boolean value if the corrected file is to be output"""
logger.info( 'Saving log file')
self.__log_file = log
self.__create_events_matrix__()
if offsets:
self.correct_event_times(offsets,new_log_out)
def set_wd(self,channels=None,meanCalcChans=None,meanApplyChans=None):
"""Sets the working data to the selected channels (selects all channels by default)
Keyword Arguments:
channels=None -- list of channels to use
meanChans=None -- The channels to calculate the mean from
meanApplyChans=None - The channels to apply the mean to (default = meanCalcChans)"""
logger.info( "Loading working data")
self.__wd = self.data(channels=channels if channels else self.channels())
self.__wc = channels if channels else self.channels()
if meanCalcChans is not None:
self.set_mean(meanCalcChans, meanApplyChans)
def wc(self):
"""Returns a list of channel names for the working data"""
return list(self.__wc)
def set_fs(self,fs):
"""Set the frequency that the data was sampled at
Keyword Arguments:
fs -- sample frequency"""
self.__signals['fs']=fs
self.__flushSignals__()
def splice(self,data=None,times=None,indices=None):
"""Returns the signal specified between two time points
data=None -- The data to splice (default is the whole data set)
times=None -- The start and end times to splice between
indices=None -- The start and end indices"""
if data is None:
data = self.wd()
if times:
return data.ix[self.snap_time(min(times)):self.snap_time(max(times))].values[:-1]
elif indices:
return data.iloc[indices]
def eventsTimes(self,events,limit=None):
"""Get combined time indices of each event period
events - Events to index
limit=None - Specify cut-off for each event in seconds"""
limit = None if limit is None else int(limit*self.fs())
allTimes = np.array([])
for i in range(len(events)):
x = self.event_times(event=events.iloc[i])[:limit]
allTimes = np.hstack([allTimes,x])
return allTimes
#Timing functions
def snap_time(self,t):
"""Finds the nearest time
Keyword Arguments:
t - Time to snap to
"""
return self.time_to_index(t)/float(self.fs())
#return self.times()[self.time_to_index(t)]
def index_to_time(self,ix):
"""Returns the time of a given index
Keyword Arguments:
ix - Index of time"""
return self.times().iloc[ix]
def time_to_index(self,t):
"""Returns the index of a given time point
Keyword Arguments:
t -- Time point"""
return int(np.floor(t*float(self.fs())))
def event_times(self,event=None,times=None):
"""Returns a full list of event sample times
Keyword Arguments:
event=None -- Event to retrieve time points of
times=None -- Start and stop times to find times between
"""
if event is not None:
[start,stop] = event[['pulse.on','pulse.off']].values
elif times is not None:
start = np.min(times)
stop = np.max(times)
else:
logger.debug('No event or time was supplied')
return None
return self.times()[self.time_to_index(start):self.time_to_index(stop)]
def event_data(self,event,chans=None):
"""Returns the data for a given event
Keyword Arguments:
event -- Event to get data from
chans=None -- Channels to get from"""
if chans is None:
chans = self.wc()
def num_points(self,event=None,times=None):
"""Returns the number of (inclusive) samples between two data points
Keyword Arguments:
event -- Event to get number of points of
times -- start and stop time to get number of points between"""
#If fs is specified then use that, otherwise will need to snip a section and check the length
if event is not None:
return self.time_to_index(event['pulse.off'])-self.time_to_index(event['pulse.on'])
elif times is not None:
return self.time_to_index(max(times))-self.time_to_index(min(times))
else:
logger.debug('No event or times were supplied ')
return None
|
|
from __future__ import unicode_literals
import copy
import datetime
from django.db import models
from django.utils.functional import curry
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
from audit_log.models.fields import LastUserField
from audit_log import settings as local_settings
try:
from django.utils.timezone import now as datetime_now
assert datetime_now
except ImportError:
import datetime
datetime_now = datetime.datetime.now
class LogEntryObjectDescriptor(object):
def __init__(self, model):
self.model = model
def __get__(self, instance, owner):
kwargs = dict((f.attname, getattr(instance, f.attname))
for f in self.model._meta.fields
if hasattr(instance, f.attname))
return self.model(**kwargs)
class AuditLogManager(models.Manager):
def __init__(self, model, attname, instance = None, ):
super(AuditLogManager, self).__init__()
self.model = model
self.instance = instance
self.attname = attname
#set a hidden attribute on the instance to control wether we should track changes
if instance is not None and not hasattr(instance, '__is_%s_enabled'%attname):
setattr(instance, '__is_%s_enabled'%attname, True)
def enable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, True)
def disable_tracking(self):
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
setattr(self.instance, '__is_%s_enabled'%self.attname, False)
def is_tracking_enabled(self):
if local_settings.DISABLE_AUDIT_LOG:
return False
if self.instance is None:
raise ValueError("Tracking can only be enabled or disabled "
"per model instance, not on a model class")
return getattr(self.instance, '__is_%s_enabled'%self.attname)
def get_queryset(self):
if self.instance is None:
return super(AuditLogManager, self).get_queryset()
f = {self.instance._meta.pk.name : self.instance.pk}
return super(AuditLogManager, self).get_queryset().filter(**f)
class AuditLogDescriptor(object):
def __init__(self, model, manager_class, attname):
self.model = model
self.manager_class = manager_class
self.attname = attname
def __get__(self, instance, owner):
if instance is None:
return self.manager_class(self.model, self.attname)
return self.manager_class(self.model, self.attname, instance)
class AuditLog(object):
manager_class = AuditLogManager
def __init__(self, exclude = []):
self._exclude = exclude
def contribute_to_class(self, cls, name):
self.manager_name = name
models.signals.class_prepared.connect(self.finalize, sender = cls)
def create_log_entry(self, instance, action_type):
manager = getattr(instance, self.manager_name)
attrs = {}
for field in instance._meta.fields:
if field.attname not in self._exclude:
attrs[field.attname] = getattr(instance, field.attname)
manager.create(action_type = action_type, **attrs)
def post_save(self, instance, created, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, created and 'I' or 'U')
def post_delete(self, instance, **kwargs):
#ignore if it is disabled
if getattr(instance, self.manager_name).is_tracking_enabled():
self.create_log_entry(instance, 'D')
def finalize(self, sender, **kwargs):
log_entry_model = self.create_log_entry_model(sender)
models.signals.post_save.connect(self.post_save, sender = sender, weak = False)
models.signals.post_delete.connect(self.post_delete, sender = sender, weak = False)
descriptor = AuditLogDescriptor(log_entry_model, self.manager_class, self.manager_name)
setattr(sender, self.manager_name, descriptor)
def copy_fields(self, model):
"""
Creates copies of the fields we are keeping
track of for the provided model, returning a
dictionary mapping field name to a copied field object.
"""
fields = {'__module__' : model.__module__}
for field in model._meta.fields:
if not field.name in self._exclude:
field = copy.deepcopy(field)
if isinstance(field, models.AutoField):
#we replace the AutoField of the original model
#with an IntegerField because a model can
#have only one autofield.
field.__class__ = models.IntegerField
if field.primary_key:
field.serialize = True
#OneToOne fields should really be tracked
#as ForeignKey fields
if isinstance(field, models.OneToOneField):
field.__class__ = models.ForeignKey
if field.primary_key or field.unique:
#unique fields of the original model
#can not be guaranteed to be unique
#in the audit log entry but they
#should still be indexed for faster lookups.
field.primary_key = False
field._unique = False
field.db_index = True
if field.remote_field and field.remote_field.related_name:
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.related_name
)
elif field.remote_field:
try:
if field.remote_field.get_accessor_name():
field.remote_field.related_name = '_auditlog_{}_{}'.format(
model._meta.model_name,
field.remote_field.get_accessor_name()
)
except e:
pass
fields[field.name] = field
return fields
def get_logging_fields(self, model):
"""
Returns a dictionary mapping of the fields that are used for
keeping the acutal audit log entries.
"""
rel_name = '_%s_audit_log_entry'%model._meta.object_name.lower()
def entry_instance_to_unicode(log_entry):
try:
result = '%s: %s %s at %s'%(model._meta.object_name,
log_entry.object_state,
log_entry.get_action_type_display().lower(),
log_entry.action_date,
)
except AttributeError:
result = '%s %s at %s'%(model._meta.object_name,
log_entry.get_action_type_display().lower(),
log_entry.action_date
)
return result
action_user_field = LastUserField(related_name = rel_name, editable = False)
#check if the manager has been attached to auth user model
if [model._meta.app_label, model.__name__] == getattr(settings, 'AUTH_USER_MODEL', 'auth.User').split("."):
action_user_field = LastUserField(related_name = rel_name, editable = False, to = 'self')
return {
'action_id' : models.AutoField(primary_key = True),
'action_date' : models.DateTimeField(default = datetime_now, editable = False, blank=False),
'action_user' : action_user_field,
'action_type' : models.CharField(max_length = 1, editable = False, choices = (
('I', _('Created')),
('U', _('Changed')),
('D', _('Deleted')),
)),
'object_state' : LogEntryObjectDescriptor(model),
'__unicode__' : entry_instance_to_unicode,
}
def get_meta_options(self, model):
"""
Returns a dictionary of Meta options for the
autdit log model.
"""
result = {
'ordering' : ('-action_date',),
'app_label' : model._meta.app_label,
}
from django.db.models.options import DEFAULT_NAMES
if 'default_permissions' in DEFAULT_NAMES:
result.update({'default_permissions': ()})
return result
def create_log_entry_model(self, model):
"""
Creates a log entry model that will be associated with
the model provided.
"""
attrs = self.copy_fields(model)
attrs.update(self.get_logging_fields(model))
attrs.update(Meta = type(str('Meta'), (), self.get_meta_options(model)))
name = str('%sAuditLogEntry'%model._meta.object_name)
return type(name, (models.Model,), attrs)
|
|
# encoding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
import datetime
import re
import warnings
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models.loading import get_model
from django.utils import six
import haystack
from haystack.backends import BaseEngine, BaseSearchBackend, BaseSearchQuery, log_query
from haystack.constants import DEFAULT_OPERATOR, DJANGO_CT, DJANGO_ID, ID
from haystack.exceptions import MissingDependency, MoreLikeThisError
from haystack.inputs import Clean, Exact, PythonData, Raw
from haystack.models import SearchResult
from haystack.utils import log as logging
from haystack.utils import get_identifier, get_model_ct
try:
import elasticsearch
from elasticsearch.helpers import bulk_index
from elasticsearch.exceptions import NotFoundError
except ImportError:
raise MissingDependency("The 'elasticsearch' backend requires the installation of 'elasticsearch'. Please refer to the documentation.")
DATETIME_REGEX = re.compile(
r'^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})T'
r'(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2})(\.\d+)?$')
class ElasticsearchSearchBackend(BaseSearchBackend):
# Word reserved by Elasticsearch for special use.
RESERVED_WORDS = (
'AND',
'NOT',
'OR',
'TO',
)
# Characters reserved by Elasticsearch for special use.
# The '\\' must come first, so as not to overwrite the other slash replacements.
RESERVED_CHARACTERS = (
'\\', '+', '-', '&&', '||', '!', '(', ')', '{', '}',
'[', ']', '^', '"', '~', '*', '?', ':', '/',
)
# Settings to add an n-gram & edge n-gram analyzer.
DEFAULT_SETTINGS = {
'settings': {
"analysis": {
"analyzer": {
"ngram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_ngram", "lowercase"]
},
"edgengram_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": ["haystack_edgengram", "lowercase"]
}
},
"tokenizer": {
"haystack_ngram_tokenizer": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15,
},
"haystack_edgengram_tokenizer": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15,
"side": "front"
}
},
"filter": {
"haystack_ngram": {
"type": "nGram",
"min_gram": 3,
"max_gram": 15
},
"haystack_edgengram": {
"type": "edgeNGram",
"min_gram": 2,
"max_gram": 15
}
}
}
}
}
def __init__(self, connection_alias, **connection_options):
super(ElasticsearchSearchBackend, self).__init__(connection_alias, **connection_options)
if not 'URL' in connection_options:
raise ImproperlyConfigured("You must specify a 'URL' in your settings for connection '%s'." % connection_alias)
if not 'INDEX_NAME' in connection_options:
raise ImproperlyConfigured("You must specify a 'INDEX_NAME' in your settings for connection '%s'." % connection_alias)
self.conn = elasticsearch.Elasticsearch(connection_options['URL'], timeout=self.timeout, **connection_options.get('KWARGS', {}))
self.index_name = connection_options['INDEX_NAME']
self.log = logging.getLogger('haystack')
self.setup_complete = False
self.existing_mapping = {}
def setup(self):
"""
Defers loading until needed.
"""
# Get the existing mapping & cache it. We'll compare it
# during the ``update`` & if it doesn't match, we'll put the new
# mapping.
try:
self.existing_mapping = self.conn.indices.get_mapping(index=self.index_name)
except NotFoundError:
pass
except Exception:
if not self.silently_fail:
raise
unified_index = haystack.connections[self.connection_alias].get_unified_index()
self.content_field_name, field_mapping = self.build_schema(unified_index.all_searchfields())
current_mapping = {
'modelresult': {
'properties': field_mapping,
'_boost': {
'name': 'boost',
'null_value': 1.0
}
}
}
if current_mapping != self.existing_mapping:
try:
# Make sure the index is there first.
self.conn.indices.create(index=self.index_name, body=self.DEFAULT_SETTINGS, ignore=400)
self.conn.indices.put_mapping(index=self.index_name, doc_type='modelresult', body=current_mapping)
self.existing_mapping = current_mapping
except Exception:
if not self.silently_fail:
raise
self.setup_complete = True
def update(self, index, iterable, commit=True):
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to add documents to Elasticsearch: %s", e)
return
prepped_docs = []
for obj in iterable:
try:
prepped_data = index.full_prepare(obj)
final_data = {}
# Convert the data to make sure it's happy.
for key, value in prepped_data.items():
final_data[key] = self._from_python(value)
final_data['_id'] = final_data[ID]
prepped_docs.append(final_data)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
# We'll log the object identifier but won't include the actual object
# to avoid the possibility of that generating encoding errors while
# processing the log message:
self.log.error(u"%s while preparing object for update" % e.__class__.__name__, exc_info=True, extra={
"data": {
"index": index,
"object": get_identifier(obj)
}
})
bulk_index(self.conn, prepped_docs, index=self.index_name, doc_type='modelresult')
if commit:
self.conn.indices.refresh(index=self.index_name)
def remove(self, obj_or_string, commit=True):
doc_id = get_identifier(obj_or_string)
if not self.setup_complete:
try:
self.setup()
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Elasticsearch: %s", doc_id, e)
return
try:
self.conn.delete(index=self.index_name, doc_type='modelresult', id=doc_id, ignore=404)
if commit:
self.conn.indices.refresh(index=self.index_name)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to remove document '%s' from Elasticsearch: %s", doc_id, e)
def clear(self, models=[], commit=True):
# We actually don't want to do this here, as mappings could be
# very different.
# if not self.setup_complete:
# self.setup()
try:
if not models:
self.conn.indices.delete(index=self.index_name, ignore=404)
self.setup_complete = False
self.existing_mapping = {}
else:
models_to_delete = []
for model in models:
models_to_delete.append("%s:%s" % (DJANGO_CT, get_model_ct(model)))
# Delete by query in Elasticsearch asssumes you're dealing with
# a ``query`` root object. :/
query = {'query': {'query_string': {'query': " OR ".join(models_to_delete)}}}
self.conn.delete_by_query(index=self.index_name, doc_type='modelresult', body=query)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
if len(models):
self.log.error("Failed to clear Elasticsearch index of models '%s': %s", ','.join(models_to_delete), e)
else:
self.log.error("Failed to clear Elasticsearch index: %s", e)
def build_search_kwargs(self, query_string, sort_by=None, start_offset=0, end_offset=None,
fields='', highlight=False, facets=None,
date_facets=None, query_facets=None,
narrow_queries=None, spelling_query=None,
within=None, dwithin=None, distance_point=None,
models=None, limit_to_registered_models=None,
result_class=None):
index = haystack.connections[self.connection_alias].get_unified_index()
content_field = index.document_field
if query_string == '*:*':
kwargs = {
'query': {
"match_all": {}
},
}
else:
kwargs = {
'query': {
'query_string': {
'default_field': content_field,
'default_operator': DEFAULT_OPERATOR,
'query': query_string,
'analyze_wildcard': True,
'auto_generate_phrase_queries': True,
},
},
}
# so far, no filters
filters = []
if fields:
if isinstance(fields, (list, set)):
fields = " ".join(fields)
kwargs['fields'] = fields
if sort_by is not None:
order_list = []
for field, direction in sort_by:
if field == 'distance' and distance_point:
# Do the geo-enabled sort.
lng, lat = distance_point['point'].get_coords()
sort_kwargs = {
"_geo_distance": {
distance_point['field']: [lng, lat],
"order": direction,
"unit": "km"
}
}
else:
if field == 'distance':
warnings.warn("In order to sort by distance, you must call the '.distance(...)' method.")
# Regular sorting.
sort_kwargs = {field: {'order': direction}}
order_list.append(sort_kwargs)
kwargs['sort'] = order_list
# From/size offsets don't seem to work right in Elasticsearch's DSL. :/
# if start_offset is not None:
# kwargs['from'] = start_offset
# if end_offset is not None:
# kwargs['size'] = end_offset - start_offset
if highlight is True:
kwargs['highlight'] = {
'fields': {
content_field: {'store': 'yes'},
}
}
if self.include_spelling:
kwargs['suggest'] = {
'suggest': {
'text': spelling_query or query_string,
'term': {
# Using content_field here will result in suggestions of stemmed words.
'field': '_all',
},
},
}
if narrow_queries is None:
narrow_queries = set()
if facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname, extra_options in facets.items():
facet_options = {
'terms': {
'field': facet_fieldname,
'size': 100,
},
}
# Special cases for options applied at the facet level (not the terms level).
if extra_options.pop('global_scope', False):
# Renamed "global_scope" since "global" is a python keyword.
facet_options['global'] = True
if 'facet_filter' in extra_options:
facet_options['facet_filter'] = extra_options.pop('facet_filter')
facet_options['terms'].update(extra_options)
kwargs['facets'][facet_fieldname] = facet_options
if date_facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname, value in date_facets.items():
# Need to detect on gap_by & only add amount if it's more than one.
interval = value.get('gap_by').lower()
# Need to detect on amount (can't be applied on months or years).
if value.get('gap_amount', 1) != 1 and interval not in ('month', 'year'):
# Just the first character is valid for use.
interval = "%s%s" % (value['gap_amount'], interval[:1])
kwargs['facets'][facet_fieldname] = {
'date_histogram': {
'field': facet_fieldname,
'interval': interval,
},
'facet_filter': {
"range": {
facet_fieldname: {
'from': self._from_python(value.get('start_date')),
'to': self._from_python(value.get('end_date')),
}
}
}
}
if query_facets is not None:
kwargs.setdefault('facets', {})
for facet_fieldname, value in query_facets:
kwargs['facets'][facet_fieldname] = {
'query': {
'query_string': {
'query': value,
}
},
}
if limit_to_registered_models is None:
limit_to_registered_models = getattr(settings, 'HAYSTACK_LIMIT_TO_REGISTERED_MODELS', True)
if models and len(models):
model_choices = sorted(get_model_ct(model) for model in models)
elif limit_to_registered_models:
# Using narrow queries, limit the results to only models handled
# with the current routers.
model_choices = self.build_models_list()
else:
model_choices = []
if len(model_choices) > 0:
filters.append({"terms": {DJANGO_CT: model_choices}})
for q in narrow_queries:
filters.append({
'fquery': {
'query': {
'query_string': {
'query': q
},
},
'_cache': True,
}
})
if within is not None:
from haystack.utils.geo import generate_bounding_box
((south, west), (north, east)) = generate_bounding_box(within['point_1'], within['point_2'])
within_filter = {
"geo_bounding_box": {
within['field']: {
"top_left": {
"lat": north,
"lon": west
},
"bottom_right": {
"lat": south,
"lon": east
}
}
},
}
filters.append(within_filter)
if dwithin is not None:
lng, lat = dwithin['point'].get_coords()
# NB: the 1.0.0 release of elasticsearch introduce an
# incompatible change on the distance filter formating
if elasticsearch.VERSION >= (1, 0, 0):
distance = "%(dist).6f%(unit)s" % {
'dist': dwithin['distance'].km,
'unit': "km"
}
else:
distance = dwithin['distance'].km
dwithin_filter = {
"geo_distance": {
"distance": distance,
dwithin['field']: {
"lat": lat,
"lon": lng
}
}
}
filters.append(dwithin_filter)
# if we want to filter, change the query type to filteres
if filters:
kwargs["query"] = {"filtered": {"query": kwargs.pop("query")}}
if len(filters) == 1:
kwargs['query']['filtered']["filter"] = filters[0]
else:
kwargs['query']['filtered']["filter"] = {"bool": {"must": filters}}
return kwargs
@log_query
def search(self, query_string, **kwargs):
if len(query_string) == 0:
return {
'results': [],
'hits': 0,
}
if not self.setup_complete:
self.setup()
search_kwargs = self.build_search_kwargs(query_string, **kwargs)
search_kwargs['from'] = kwargs.get('start_offset', 0)
order_fields = set()
for order in search_kwargs.get('sort', []):
for key in order.keys():
order_fields.add(key)
geo_sort = '_geo_distance' in order_fields
end_offset = kwargs.get('end_offset')
start_offset = kwargs.get('start_offset', 0)
if end_offset is not None and end_offset > start_offset:
search_kwargs['size'] = end_offset - start_offset
try:
raw_results = self.conn.search(body=search_kwargs,
index=self.index_name,
doc_type='modelresult',
_source=True)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to query Elasticsearch using '%s': %s", query_string, e)
raw_results = {}
return self._process_results(raw_results,
highlight=kwargs.get('highlight'),
result_class=kwargs.get('result_class', SearchResult),
distance_point=kwargs.get('distance_point'),
geo_sort=geo_sort)
def more_like_this(self, model_instance, additional_query_string=None,
start_offset=0, end_offset=None, models=None,
limit_to_registered_models=None, result_class=None, **kwargs):
from haystack import connections
if not self.setup_complete:
self.setup()
# Deferred models will have a different class ("RealClass_Deferred_fieldname")
# which won't be in our registry:
model_klass = model_instance._meta.concrete_model
index = connections[self.connection_alias].get_unified_index().get_index(model_klass)
field_name = index.get_content_field()
params = {}
if start_offset is not None:
params['search_from'] = start_offset
if end_offset is not None:
params['search_size'] = end_offset - start_offset
doc_id = get_identifier(model_instance)
try:
raw_results = self.conn.mlt(index=self.index_name, doc_type='modelresult', id=doc_id, mlt_fields=[field_name], **params)
except elasticsearch.TransportError as e:
if not self.silently_fail:
raise
self.log.error("Failed to fetch More Like This from Elasticsearch for document '%s': %s", doc_id, e)
raw_results = {}
return self._process_results(raw_results, result_class=result_class)
def _process_results(self, raw_results, highlight=False,
result_class=None, distance_point=None,
geo_sort=False):
from haystack import connections
results = []
hits = raw_results.get('hits', {}).get('total', 0)
facets = {}
spelling_suggestion = None
if result_class is None:
result_class = SearchResult
if self.include_spelling and 'suggest' in raw_results:
raw_suggest = raw_results['suggest'].get('suggest')
if raw_suggest:
spelling_suggestion = ' '.join([word['text'] if len(word['options']) == 0 else word['options'][0]['text'] for word in raw_suggest])
if 'facets' in raw_results:
facets = {
'fields': {},
'dates': {},
'queries': {},
}
for facet_fieldname, facet_info in raw_results['facets'].items():
if facet_info.get('_type', 'terms') == 'terms':
facets['fields'][facet_fieldname] = [(individual['term'], individual['count']) for individual in facet_info['terms']]
elif facet_info.get('_type', 'terms') == 'date_histogram':
# Elasticsearch provides UTC timestamps with an extra three
# decimals of precision, which datetime barfs on.
facets['dates'][facet_fieldname] = [(datetime.datetime.utcfromtimestamp(individual['time'] / 1000), individual['count']) for individual in facet_info['entries']]
elif facet_info.get('_type', 'terms') == 'query':
facets['queries'][facet_fieldname] = facet_info['count']
unified_index = connections[self.connection_alias].get_unified_index()
indexed_models = unified_index.get_indexed_models()
content_field = unified_index.document_field
for raw_result in raw_results.get('hits', {}).get('hits', []):
source = raw_result['_source']
app_label, model_name = source[DJANGO_CT].split('.')
additional_fields = {}
model = get_model(app_label, model_name)
if model and model in indexed_models:
for key, value in source.items():
index = unified_index.get_index(model)
string_key = str(key)
if string_key in index.fields and hasattr(index.fields[string_key], 'convert'):
additional_fields[string_key] = index.fields[string_key].convert(value)
else:
additional_fields[string_key] = self._to_python(value)
del(additional_fields[DJANGO_CT])
del(additional_fields[DJANGO_ID])
if 'highlight' in raw_result:
additional_fields['highlighted'] = raw_result['highlight'].get(content_field, '')
if distance_point:
additional_fields['_point_of_origin'] = distance_point
if geo_sort and raw_result.get('sort'):
from haystack.utils.geo import Distance
additional_fields['_distance'] = Distance(km=float(raw_result['sort'][0]))
else:
additional_fields['_distance'] = None
result = result_class(app_label, model_name, source[DJANGO_ID], raw_result['_score'], **additional_fields)
results.append(result)
else:
hits -= 1
return {
'results': results,
'hits': hits,
'facets': facets,
'spelling_suggestion': spelling_suggestion,
}
def build_schema(self, fields):
content_field_name = ''
mapping = {
DJANGO_CT: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
DJANGO_ID: {'type': 'string', 'index': 'not_analyzed', 'include_in_all': False},
}
for field_name, field_class in fields.items():
field_mapping = FIELD_MAPPINGS.get(field_class.field_type, DEFAULT_FIELD_MAPPING).copy()
if field_class.boost != 1.0:
field_mapping['boost'] = field_class.boost
if field_class.document is True:
content_field_name = field_class.index_fieldname
# Do this last to override `text` fields.
if field_mapping['type'] == 'string':
if field_class.indexed is False or hasattr(field_class, 'facet_for'):
field_mapping['index'] = 'not_analyzed'
del field_mapping['analyzer']
mapping[field_class.index_fieldname] = field_mapping
return (content_field_name, mapping)
def _iso_datetime(self, value):
"""
If value appears to be something datetime-like, return it in ISO format.
Otherwise, return None.
"""
if hasattr(value, 'strftime'):
if hasattr(value, 'hour'):
return value.isoformat()
else:
return '%sT00:00:00' % value.isoformat()
def _from_python(self, value):
"""Convert more Python data types to ES-understandable JSON."""
iso = self._iso_datetime(value)
if iso:
return iso
elif isinstance(value, six.binary_type):
# TODO: Be stricter.
return six.text_type(value, errors='replace')
elif isinstance(value, set):
return list(value)
return value
def _to_python(self, value):
"""Convert values from ElasticSearch to native Python values."""
if isinstance(value, (int, float, complex, list, tuple, bool)):
return value
if isinstance(value, six.string_types):
possible_datetime = DATETIME_REGEX.search(value)
if possible_datetime:
date_values = possible_datetime.groupdict()
for dk, dv in date_values.items():
date_values[dk] = int(dv)
return datetime.datetime(
date_values['year'], date_values['month'],
date_values['day'], date_values['hour'],
date_values['minute'], date_values['second'])
try:
# This is slightly gross but it's hard to tell otherwise what the
# string's original type might have been. Be careful who you trust.
converted_value = eval(value)
# Try to handle most built-in types.
if isinstance(
converted_value,
(int, list, tuple, set, dict, float, complex)):
return converted_value
except Exception:
# If it fails (SyntaxError or its ilk) or we don't trust it,
# continue on.
pass
return value
# DRL_FIXME: Perhaps move to something where, if none of these
# match, call a custom method on the form that returns, per-backend,
# the right type of storage?
DEFAULT_FIELD_MAPPING = {'type': 'string', 'analyzer': 'snowball'}
FIELD_MAPPINGS = {
'edge_ngram': {'type': 'string', 'analyzer': 'edgengram_analyzer'},
'ngram': {'type': 'string', 'analyzer': 'ngram_analyzer'},
'date': {'type': 'date'},
'datetime': {'type': 'date'},
'location': {'type': 'geo_point'},
'boolean': {'type': 'boolean'},
'float': {'type': 'float'},
'long': {'type': 'long'},
'integer': {'type': 'long'},
}
# Sucks that this is almost an exact copy of what's in the Solr backend,
# but we can't import due to dependencies.
class ElasticsearchSearchQuery(BaseSearchQuery):
def matching_all_fragment(self):
return '*:*'
def build_query_fragment(self, field, filter_type, value):
from haystack import connections
query_frag = ''
if not hasattr(value, 'input_type_name'):
# Handle when we've got a ``ValuesListQuerySet``...
if hasattr(value, 'values_list'):
value = list(value)
if isinstance(value, six.string_types):
# It's not an ``InputType``. Assume ``Clean``.
value = Clean(value)
else:
value = PythonData(value)
# Prepare the query using the InputType.
prepared_value = value.prepare(self)
if not isinstance(prepared_value, (set, list, tuple)):
# Then convert whatever we get back to what pysolr wants if needed.
prepared_value = self.backend._from_python(prepared_value)
# 'content' is a special reserved word, much like 'pk' in
# Django's ORM layer. It indicates 'no special field'.
if field == 'content':
index_fieldname = ''
else:
index_fieldname = u'%s:' % connections[self._using].get_unified_index().get_index_fieldname(field)
filter_types = {
'contains': u'%s',
'startswith': u'%s*',
'exact': u'%s',
'gt': u'{%s TO *}',
'gte': u'[%s TO *]',
'lt': u'{* TO %s}',
'lte': u'[* TO %s]',
}
if value.post_process is False:
query_frag = prepared_value
else:
if filter_type in ['contains', 'startswith']:
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
# Iterate over terms & incorportate the converted form of each into the query.
terms = []
if isinstance(prepared_value, six.string_types):
for possible_value in prepared_value.split(' '):
terms.append(filter_types[filter_type] % self.backend._from_python(possible_value))
else:
terms.append(filter_types[filter_type] % self.backend._from_python(prepared_value))
if len(terms) == 1:
query_frag = terms[0]
else:
query_frag = u"(%s)" % " AND ".join(terms)
elif filter_type == 'in':
in_options = []
for possible_value in prepared_value:
in_options.append(u'"%s"' % self.backend._from_python(possible_value))
query_frag = u"(%s)" % " OR ".join(in_options)
elif filter_type == 'range':
start = self.backend._from_python(prepared_value[0])
end = self.backend._from_python(prepared_value[1])
query_frag = u'["%s" TO "%s"]' % (start, end)
elif filter_type == 'exact':
if value.input_type_name == 'exact':
query_frag = prepared_value
else:
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
else:
if value.input_type_name != 'exact':
prepared_value = Exact(prepared_value).prepare(self)
query_frag = filter_types[filter_type] % prepared_value
if len(query_frag) and not isinstance(value, Raw):
if not query_frag.startswith('(') and not query_frag.endswith(')'):
query_frag = "(%s)" % query_frag
return u"%s%s" % (index_fieldname, query_frag)
def build_alt_parser_query(self, parser_name, query_string='', **kwargs):
if query_string:
kwargs['v'] = query_string
kwarg_bits = []
for key in sorted(kwargs.keys()):
if isinstance(kwargs[key], six.string_types) and ' ' in kwargs[key]:
kwarg_bits.append(u"%s='%s'" % (key, kwargs[key]))
else:
kwarg_bits.append(u"%s=%s" % (key, kwargs[key]))
return u"{!%s %s}" % (parser_name, ' '.join(kwarg_bits))
def build_params(self, spelling_query=None, **kwargs):
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class
}
order_by_list = None
if self.order_by:
if order_by_list is None:
order_by_list = []
for field in self.order_by:
direction = 'asc'
if field.startswith('-'):
direction = 'desc'
field = field[1:]
order_by_list.append((field, direction))
search_kwargs['sort_by'] = order_by_list
if self.date_facets:
search_kwargs['date_facets'] = self.date_facets
if self.distance_point:
search_kwargs['distance_point'] = self.distance_point
if self.dwithin:
search_kwargs['dwithin'] = self.dwithin
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset
if self.facets:
search_kwargs['facets'] = self.facets
if self.fields:
search_kwargs['fields'] = self.fields
if self.highlight:
search_kwargs['highlight'] = self.highlight
if self.models:
search_kwargs['models'] = self.models
if self.narrow_queries:
search_kwargs['narrow_queries'] = self.narrow_queries
if self.query_facets:
search_kwargs['query_facets'] = self.query_facets
if self.within:
search_kwargs['within'] = self.within
if spelling_query:
search_kwargs['spelling_query'] = spelling_query
return search_kwargs
def run(self, spelling_query=None, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
final_query = self.build_query()
search_kwargs = self.build_params(spelling_query, **kwargs)
if kwargs:
search_kwargs.update(kwargs)
results = self.backend.search(final_query, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
self._facet_counts = self.post_process_facets(results)
self._spelling_suggestion = results.get('spelling_suggestion', None)
def run_mlt(self, **kwargs):
"""Builds and executes the query. Returns a list of search results."""
if self._more_like_this is False or self._mlt_instance is None:
raise MoreLikeThisError("No instance was provided to determine 'More Like This' results.")
additional_query_string = self.build_query()
search_kwargs = {
'start_offset': self.start_offset,
'result_class': self.result_class,
'models': self.models
}
if self.end_offset is not None:
search_kwargs['end_offset'] = self.end_offset - self.start_offset
results = self.backend.more_like_this(self._mlt_instance, additional_query_string, **search_kwargs)
self._results = results.get('results', [])
self._hit_count = results.get('hits', 0)
class ElasticsearchSearchEngine(BaseEngine):
backend = ElasticsearchSearchBackend
query = ElasticsearchSearchQuery
|
|
# Author: Nicolas Boulanger-Lewandowski
# University of Montreal (2012)
# RNN-RBM deep learning tutorial
# More information at http://deeplearning.net/tutorial/rnnrbm.html
import glob
import os
import sys
import numpy
try:
import pylab
except ImportError:
print (
"pylab isn't available. If you use its functionality, it will crash."
)
print "It can be installed with 'pip install -q Pillow'"
from midi.utils import midiread, midiwrite
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
#Don't use a python long as this don't work on 32 bits computers.
numpy.random.seed(0xbeef)
rng = RandomStreams(seed=numpy.random.randint(1 << 30))
theano.config.warn.subtensor_merge_bug = False
def build_rbm(v, W, bv, bh, k):
'''Construct a k-step Gibbs chain starting at v for an RBM.
v : Theano vector or matrix
If a matrix, multiple chains will be run in parallel (batch).
W : Theano matrix
Weight matrix of the RBM.
bv : Theano vector
Visible bias vector of the RBM.
bh : Theano vector
Hidden bias vector of the RBM.
k : scalar or Theano scalar
Length of the Gibbs chain.
Return a (v_sample, cost, monitor, updates) tuple:
v_sample : Theano vector or matrix with the same shape as `v`
Corresponds to the generated sample(s).
cost : Theano scalar
Expression whose gradient with respect to W, bv, bh is the CD-k
approximation to the log-likelihood of `v` (training example) under the
RBM. The cost is averaged in the batch case.
monitor: Theano scalar
Pseudo log-likelihood (also averaged in the batch case).
updates: dictionary of Theano variable -> Theano variable
The `updates` object returned by scan.'''
def gibbs_step(v):
mean_h = T.nnet.sigmoid(T.dot(v, W) + bh)
h = rng.binomial(size=mean_h.shape, n=1, p=mean_h,
dtype=theano.config.floatX)
mean_v = T.nnet.sigmoid(T.dot(h, W.T) + bv)
v = rng.binomial(size=mean_v.shape, n=1, p=mean_v,
dtype=theano.config.floatX)
return mean_v, v
chain, updates = theano.scan(lambda v: gibbs_step(v)[1], outputs_info=[v],
n_steps=k)
v_sample = chain[-1]
mean_v = gibbs_step(v_sample)[0]
monitor = T.xlogx.xlogy0(v, mean_v) + T.xlogx.xlogy0(1 - v, 1 - mean_v)
monitor = monitor.sum() / v.shape[0]
def free_energy(v):
return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum()
cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0]
return v_sample, cost, monitor, updates
def shared_normal(num_rows, num_cols, scale=1):
'''Initialize a matrix shared variable with normally distributed
elements.'''
return theano.shared(numpy.random.normal(
scale=scale, size=(num_rows, num_cols)).astype(theano.config.floatX))
def shared_zeros(*shape):
'''Initialize a vector shared variable with zero elements.'''
return theano.shared(numpy.zeros(shape, dtype=theano.config.floatX))
def build_rnnrbm(n_visible, n_hidden, n_hidden_recurrent):
'''Construct a symbolic RNN-RBM and initialize parameters.
n_visible : integer
Number of visible units.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
Return a (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) tuple:
v : Theano matrix
Symbolic variable holding an input sequence (used during training)
v_sample : Theano matrix
Symbolic variable holding the negative particles for CD log-likelihood
gradient estimation (used during training)
cost : Theano scalar
Expression whose gradient (considering v_sample constant) corresponds
to the LL gradient of the RNN-RBM (used during training)
monitor : Theano scalar
Frame-level pseudo-likelihood (useful for monitoring during training)
params : tuple of Theano shared variables
The parameters of the model to be optimized during training.
updates_train : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling
the training function.
v_t : Theano matrix
Symbolic variable holding a generated sequence (used during sampling)
updates_generate : dictionary of Theano variable -> Theano variable
Update object that should be passed to theano.function when compiling
the generation function.'''
W = shared_normal(n_visible, n_hidden, 0.01)
bv = shared_zeros(n_visible)
bh = shared_zeros(n_hidden)
Wuh = shared_normal(n_hidden_recurrent, n_hidden, 0.0001)
Wuv = shared_normal(n_hidden_recurrent, n_visible, 0.0001)
Wvu = shared_normal(n_visible, n_hidden_recurrent, 0.0001)
Wuu = shared_normal(n_hidden_recurrent, n_hidden_recurrent, 0.0001)
bu = shared_zeros(n_hidden_recurrent)
params = W, bv, bh, Wuh, Wuv, Wvu, Wuu, bu # learned parameters as shared
# variables
v = T.matrix() # a training sequence
u0 = T.zeros((n_hidden_recurrent,)) # initial value for the RNN hidden
# units
# If `v_t` is given, deterministic recurrence to compute the variable
# biases bv_t, bh_t at each time step. If `v_t` is None, same recurrence
# but with a separate Gibbs chain at each time step to sample (generate)
# from the RNN-RBM. The resulting sample v_t is returned in order to be
# passed down to the sequence history.
def recurrence(v_t, u_tm1):
bv_t = bv + T.dot(u_tm1, Wuv)
bh_t = bh + T.dot(u_tm1, Wuh)
generate = v_t is None
if generate:
v_t, _, _, updates = build_rbm(T.zeros((n_visible,)), W, bv_t,
bh_t, k=25)
u_t = T.tanh(bu + T.dot(v_t, Wvu) + T.dot(u_tm1, Wuu))
return ([v_t, u_t], updates) if generate else [u_t, bv_t, bh_t]
# For training, the deterministic recurrence is used to compute all the
# {bv_t, bh_t, 1 <= t <= T} given v. Conditional RBMs can then be trained
# in batches using those parameters.
(u_t, bv_t, bh_t), updates_train = theano.scan(
lambda v_t, u_tm1, *_: recurrence(v_t, u_tm1),
sequences=v, outputs_info=[u0, None, None], non_sequences=params)
v_sample, cost, monitor, updates_rbm = build_rbm(v, W, bv_t[:], bh_t[:],
k=15)
updates_train.update(updates_rbm)
# symbolic loop for sequence generation
(v_t, u_t), updates_generate = theano.scan(
lambda u_tm1, *_: recurrence(None, u_tm1),
outputs_info=[None, u0], non_sequences=params, n_steps=200)
return (v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate)
class RnnRbm:
'''Simple class to train an RNN-RBM from MIDI files and to generate sample
sequences.'''
def __init__(
self,
n_hidden=150,
n_hidden_recurrent=100,
lr=0.001,
r=(21, 109),
dt=0.3
):
'''Constructs and compiles Theano functions for training and sequence
generation.
n_hidden : integer
Number of hidden units of the conditional RBMs.
n_hidden_recurrent : integer
Number of hidden units of the RNN.
lr : float
Learning rate
r : (integer, integer) tuple
Specifies the pitch range of the piano-roll in MIDI note numbers,
including r[0] but not r[1], such that r[1]-r[0] is the number of
visible units of the RBM at a given time step. The default (21,
109) corresponds to the full range of piano (88 notes).
dt : float
Sampling period when converting the MIDI files into piano-rolls, or
equivalently the time difference between consecutive time steps.'''
self.r = r
self.dt = dt
(v, v_sample, cost, monitor, params, updates_train, v_t,
updates_generate) = build_rnnrbm(
r[1] - r[0],
n_hidden,
n_hidden_recurrent
)
gradient = T.grad(cost, params, consider_constant=[v_sample])
updates_train.update(
((p, p - lr * g) for p, g in zip(params, gradient))
)
self.train_function = theano.function(
[v],
monitor,
updates=updates_train
)
self.generate_function = theano.function(
[],
v_t,
updates=updates_generate
)
def train(self, files, batch_size=100, num_epochs=200):
'''Train the RNN-RBM via stochastic gradient descent (SGD) using MIDI
files converted to piano-rolls.
files : list of strings
List of MIDI files that will be loaded as piano-rolls for training.
batch_size : integer
Training sequences will be split into subsequences of at most this
size before applying the SGD updates.
num_epochs : integer
Number of epochs (pass over the training set) performed. The user
can safely interrupt training with Ctrl+C at any time.'''
assert len(files) > 0, 'Training set is empty!' \
' (did you download the data files?)'
dataset = [midiread(f, self.r,
self.dt).piano_roll.astype(theano.config.floatX)
for f in files]
try:
for epoch in xrange(num_epochs):
numpy.random.shuffle(dataset)
costs = []
for s, sequence in enumerate(dataset):
for i in xrange(0, len(sequence), batch_size):
cost = self.train_function(sequence[i:i + batch_size])
costs.append(cost)
print 'Epoch %i/%i' % (epoch + 1, num_epochs),
print numpy.mean(costs)
sys.stdout.flush()
except KeyboardInterrupt:
print 'Interrupted by user.'
def generate(self, filename, show=True):
'''Generate a sample sequence, plot the resulting piano-roll and save
it as a MIDI file.
filename : string
A MIDI file will be created at this location.
show : boolean
If True, a piano-roll of the generated sequence will be shown.'''
piano_roll = self.generate_function()
midiwrite(filename, piano_roll, self.r, self.dt)
if show:
extent = (0, self.dt * len(piano_roll)) + self.r
pylab.figure()
pylab.imshow(piano_roll.T, origin='lower', aspect='auto',
interpolation='nearest', cmap=pylab.cm.gray_r,
extent=extent)
pylab.xlabel('time (s)')
pylab.ylabel('MIDI note number')
pylab.title('generated piano-roll')
def test_rnnrbm(batch_size=100, num_epochs=200):
model = RnnRbm()
re = os.path.join(os.path.split(os.path.dirname(__file__))[0],
'data', 'Nottingham', 'train', '*.mid')
model.train(glob.glob(re),
batch_size=batch_size, num_epochs=num_epochs)
return model
if __name__ == '__main__':
model = test_rnnrbm()
model.generate('sample1.mid')
model.generate('sample2.mid')
pylab.show()
|
|
import sys
import mosek.array
from mosek import array
#import numpy
import operator
import random
import new
class AbstractMethodError(Exception): pass
def is_basestring(s):
return isinstance(s,basestring)
class PrintTrace:
def __enter__(self):
return self
def __exit__(self,exc_type,exc_value,tb):
if (exc_type and exc_value and tb):
import traceback
traceback.print_tb(tb)
print exc_value
def printtrace(f):
def _(*args):
with PrintTrace() as _:
return f(*args)
return _
if sys.maxint > 0x7fffffff:
int64 = int
int32 = int
else:
int64 = long
int32 = int
class Tools:
@staticmethod
def __arraycopy(src,srcoffset,tgt,tgtoffset,size):
if len(tgt) < tgtoffset+size or len(src) < srcoffset+size:
raise IndexError("Copy slice out of range")
tgt[tgtoffset:tgtoffset+size] = src[srcoffset:srcoffset+size]
_arraycopy__3II_3III = __arraycopy
_arraycopy__3LI_3LII = __arraycopy
_arraycopy__3_3II_3_3III = __arraycopy
_arraycopy__3_3FI_3_3FII = __arraycopy
_arraycopy__3FI_3FII = __arraycopy
_arraycopy__3IL_3ILL = __arraycopy
_arraycopy__3LL_3LLL = __arraycopy
_arraycopy__3FL_3FLL = __arraycopy
@staticmethod
#def __arrayclone(a): return array.array(a)
def __arrayclone(a): return a[:]
_arraycopy__3F = __arrayclone
_arraycopy__3I = __arrayclone
_arraycopy__3L = __arrayclone
_arraycopy__3S = __arrayclone
@staticmethod
def __arraylength(a): return len(a)
_arraylength__3Omosek_fusion_Variable_2 = __arraylength
_arraylength__3Omosek_fusion_Constranit_2 = __arraylength
_arraylength__3Omosek_fusion_Matrix_2 = __arraylength
_arraylength__3S = __arraylength
_arraylength__3I = __arraylength
_arraylength__3L = __arraylength
_arraylength__3F = __arraylength
_arraylength__3_3F = __arraylength
_arraylength__3_3I = __arraylength
_uarraylength__3I = __arraylength
_uarraylength__3L = __arraylength
_uarraylength__3F = __arraylength
_uarraylength__3Omosek_fusion_Variable_2 = __arraylength
_uarraylength__3Omosek_fusion_Constraint_2 = __arraylength
_uarraylength__3Omosek_fusion_Matrix_2 = __arraylength
_uarraylength__3_3S = __arraylength
_uarraylength__3_3F = __arraylength
_uarraylength__3_3I = __arraylength
@staticmethod
def __matrixheight(m): return len(m)
@staticmethod
def __matrixwidth(m): return 0 if len(m) == 0 else len(m[0])
_matrixheight__3_3F = __matrixheight
_matrixwidth__3_3F = __matrixwidth
_matrixheight__3_3Omosek_fusion_Matrix_2 = __matrixheight
_matrixwidth__3_3Omosek_fusion_Matrix_2 = __matrixwidth
@staticmethod
def _range_L (last): return array.arange(last)
@staticmethod
def _range_LL(first,last): return array.arange(first,last)
@staticmethod
def _range_LLL(first,last,step): return array.arange(first,last,step)
@staticmethod
def _range_I (last): return array.arange(last)
@staticmethod
def _range_II(first,last): return array.arange(first,last)
@staticmethod
def _range_III(first,last,step): return array.arange(first,last,step)
@staticmethod
def _range_L (last): return range(last)
@staticmethod
def _range_LL(first,last): return range(first,last)
@staticmethod
def _range_LLL(first,last,step): return range(first,last,step)
@staticmethod
def range(*args): return array.arange(*args)
@staticmethod
def _zeros_I (num): return array.zeros(num,float)
@staticmethod
def _zeros_II (dimi,dimj): return array.zeros((dimi,dimj),float)
#def zeros (dimi,dimj=None):
# if dimj is None: return _zeros_I(dimi)
# else: _zeros_II(dimi,dimj)
@staticmethod
def _ones_I(num): return array.ones(num,float)
@staticmethod
def _asint_U(i): return i
@staticmethod
def _asuint_I(i): return i
@staticmethod
def _asint_J(i): return i
@staticmethod
def _asuint_L(i): return i
@staticmethod
def _as64bit_U(i): return long(i)
@staticmethod
def _as64bit_I(i): return long(i)
@staticmethod
def _as32bit_J(i): return int(i)
@staticmethod
def _as32bit_L(i): return int(i)
@staticmethod
def _sort__3III(val,first,last):
tmp = list(val[first:last])
tmp.sort()
val[first:last] = tmp
_sort__3LII = _sort__3III
__rand = random.Random()
@staticmethod
def _randInt_I(max):
#return random.randint(0,max-1)
return Tools.__rand.randint(0,max-1)
@staticmethod
def _argsort__3L_3I_3LII(perm,val1,val2,first,last):
p = list(perm[first:last])
p.sort(lambda lhs,rhs: cmp(val1[lhs],val1[rhs]) or cmp(val2[lhs],val2[rhs]))
perm[first:last] = p
@staticmethod
def _argsort__3L_3I_3I(perm,val1,val2):
Tools._argsort__3I_3I_3III(perm,val1,val2,0,len(perm))
@staticmethod
def _argsort__3L_3I(perm,vals):
Tools._argsort__3I_3III(perm,vals,0,len(perm))
@staticmethod
def _argsort__3L_3III(perm,val,first,last):
p = list(perm[first:last])
p.sort(lambda lhs,rhs: cmp(val[lhs],val[rhs]))
perm[first:last] = p
@staticmethod
def _sort__3I_II (vals,first,last):
tmp = vals[first:last]
tmp.sort()
vals[first:last] = tmp
@staticmethod
def _makevector_FI(val,num): return array.doublearray([val] * num)
@staticmethod
def _makevector_II(val,num): return array.int32array([val] * num)
@staticmethod
def _makevector_LI(val,num): return array.int64array([long(val)] * num)
@staticmethod
def _repeatrange_III(first,last,num):
res = array.zeros((last-first)*num,array.int32)
ra = array.arange(first,last)
l = last-first
for i in range(num):
res[i*l:(i+1)*l] = ra
return res
@staticmethod
def _repeatrange_LLL(first,last,num):
res = array.zeros((last-first)*num,array.int64)
ra = array.arange(first,last)
l = last-first
for i in range(num):
res[i*l:(i+1)*l] = ra
return res
# public static method vectoradd :: [double] (var lhs :: [double],var rhs :: [double]);
@staticmethod
def _vectoradd__3F_3F (v1,v2):
return array.array([ v1[i]+v2[i] for i in xrange(len(v1)) ])
@staticmethod
def _vectorsub__3F_3F (v1,v2):
return array.array([ v1[i]-v2[i] for i in xrange(len(v1)) ])
#return v1-v2
# public static method vectorsub :: [double] (var lhs :: [double],var rhs :: [double]);
@staticmethod
def _vectorneg__3F(v):
return array.array([ -i for i in v ])
#return -v
# public static method dot :: double (var lhs :: [double],var rhs :: [double]);
# public static method sum :: double (var rhs :: [double]);
@staticmethod
def _stringvalue_I(v): return str(v)
@staticmethod
def _stringvalue_L(v): return str(v)
@staticmethod
def _toDouble_S(v):
return float(v)
@staticmethod
def _toInt_S(v):
return int(v)
#
#
#
# public static method joinvals :: string (var vals :: [int32]);
#
#class BaseException(Exception):
# def _toString_ (self): return str(self)
#
#class BaseError(Exception):
# def _toString_ (self): return str(self)
#
#class UnexpectedError(Exception):
# def _toString_ (self): return str(self)
class StringBuffer:
def __init__(self):
self.__buf = []
def __a(self,v):
self.__buf.append(str(v))
return self
def __a_array(self,v):
if v is None: self.__buf.append("None")
else: self.__buf.append(str(v))
return self
_a_I = __a
_a_L = __a
_a_U = __a
_a_F = __a
_a_S = __a
_a_B = __a
_a__3I = __a_array
def _a__3L(self,v):
if v is not None: self.__buf.extend([ '[',','.join(['%i' % i for i in v]),']'])
else: self.__buf.append('None')
return self
def _a__3F(self,v):
if v is not None: self.__buf.extend([ '[',','.join(['%g' % i for i in v]),']'])
else: self.__buf.append('None')
return self
_a__3S = __a_array
def _lf_(self):
self.__buf.append('\n')
return self
def _clear_ (self):
self.__buf = []
return self
def _toString_(self):
return ''.join(self.__buf)
def _toString_(self):
return ''.join(self.__buf)
def _consolePrint_(self):
print(self._toString_())
self._clear_()
return(self)
class IntMap:
def __init__ (self):
self.__d = {}
def _hasItem_L(self,key): return self.__d.has_key(key)
def _getItem_L(self,key): return self.__d[key]
def _setItem_LI(self,key,val): self.__d[key] = val
class OutputFileStream:
def __init__(self,filename):
try:
self.__outfile = open(filename,'wt')
except IOError,e:
raise mosek.fusion.IOError(str(e))
def _write_S(self,v):
self.__outfile.write(str(v))
return self
def _write_I(self,v):
self.__outfile.write(str(v))
return self
def _write_L(self,v):
self.__outfile.write(str(v))
return self
def _write_F(self,v):
self.__outfile.write(str(v))
return self
def _lf_(self):
self.__outfile.write('\n')
self.__outfile.flush()
return self
def _close_(self): self.__outfile.close()
class BinaryOutputStream:
def __init__(self,filename):
try:
self.__outfile = open(filename,'bt')
except IOError,e:
raise mosek.fusion.IOError(str(e))
def _write_S(self,v):
s = v.encode('utf-8')
self.__outfile.write(struct.pack('<i',len(s)))
self.__outfile.write(s)
return self
def _write_I(self,v):
self.__outfile.write(struct.pack('<i',v))
return self
def _write_L(self,v):
self.__outfile.write(struct.pack('<q',v))
return self
def _write_F(self,v):
self.__outfile.write(struct.pack('<d',v))
return self
def _write_B(self,v):
self.__outfile.write(struct.pack('<b',v))
return self
def _write__3I(self,v):
self.__outfile.write(struct.pack('<%di' % len(v),v))
return self
def _write__3L(self,v):
self.__outfile.write(struct.pack('<%dq' % len(v),v))
return self
def _write__3F(self,v):
self.__outfile.write(struct.pack('<%dd' % len(v),v))
return self
def _write__3B(self,v):
self.__outfile.write(''.join([ chr(c) for c in v]))
return self
def _close_(self): self.__outfile.close()
def new_object_array (size): return [ None ] * size
def new_basestr_array (size): return [None] * size
def new_bool_array (size): return array.zeros(size,bool)
def new_int32_array (size): return array.zeros(size,array.int32)
def new_int64_array (size): return array.zeros(size,array.int64)
def new_double_array (size): return array.zeros(size,array.float64)
def isArray (v):
return v is None or operator.isSequenceType(v)
#return v is None or isinstance(v,numpy.ndarray) or isinstance(v,mosek.array.ndarray) or isinstance(v,list)
def is_int(v):
return isinstance(v,int) or isinstance(v,long)
def is_long(v):
return isinstance(v,int) or isinstance(v,long)
def is_float(v):
return isinstance(v,float)
# Argument checking methods
def methodargs(*convs):
def _transform(func):
def _conv(*args):
try:
if len(args) != len(convs):
raise ValueError("Expected %d arguments, got %d" % (len(conv),len(args)))
return func(*[ c(a) for (a,c) in zip(args,convs) ])
except Exception, e:
raise
#raise ValueError('%s:%d: %s' % (func.func_code.co_filename,func.func_code.co_firstlineno,e))
return _conv
return _transform
def arg_id(a): return a
def arg_array(ndim,t):
def _conv(a):
if a is None: return None
# test depth:
v = a
d = 0
try:
while not isinstance(v,basestring):
v = v[0]
d += 1
except:
pass
if d != ndim:
raise ValueError("Expected array of %d dimensions, got %d" % (ndim,d))
if t is arg_id:
return array.array(a)
else:
return array.array(a,t)
return _conv
def abstractmethod(f):
def _transform(*args):
raise AbstractMethodError("Cannot invoke abstract method %s" % (f.__name__))
arg_bool = bool
arg_int32 = array.int32
arg_int64 = array.int64
arg_double = array.float64
arg_string = unicode
class FusionException(Exception):
def _toString_ (self):
return '%s: %s' % (self.__class__.__name__,str(self))
def toString(self):
return '%s: %s' % (self.__class__.__name__,str(self))
class Debug:
@staticmethod
def _o_(): return Debug()
def p(self,v):
sys.stdout.write(str(v))
return self
_p_S = p
_p_I = p
_p_L = p
_p_F = p
_p_B = p
def p_array (self,v):
if v is None:
sys.stdout.write('None')
else:
sys.stdout.write('[ %s ]' % ','.join([str(i) for i in v]))
return self
_p__3I = p_array
_p__3L = p_array
_p__3F = p_array
def _lf_(self):
sys.stdout.write("\n")
return self
def newobject(cls,init,*args):
tmp = new.instance(cls)
init(tmp,*args)
return tmp
|
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.18 (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
git_date = "$Format:%ci$"
keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = "q2cli-"
cfg.versionfile_source = "q2cli/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
env=None):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, env=env,
stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None, None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None, None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
print("stdout was %s" % stdout)
return None, p.returncode
return stdout, p.returncode
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes both
the project name and a version string. We will also support searching up
two directory levels for an appropriately named parent directory
"""
rootdirs = []
for i in range(3):
dirname = os.path.basename(root)
if dirname.startswith(parentdir_prefix):
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None, "date": None}
else:
rootdirs.append(root)
root = os.path.dirname(root) # up a level
if verbose:
print("Tried directories %s but none started with prefix %s" %
(str(rootdirs), parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
if line.strip().startswith("git_date ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["date"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
date = keywords.get("date")
if date is not None:
# git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
# datestamp. However we prefer "%ci" (which expands to an "ISO-8601
# -like" string, which we must then edit to make compliant), because
# it's been around since git-1.5.3, and it's too difficult to
# discover which version we're using, or to work around using an
# older one.
date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None,
"date": date}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags", "date": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
hide_stderr=True)
if rc != 0:
if verbose:
print("Directory %s not under git control" % root)
raise NotThisMethod("'git rev-parse --git-dir' returned error")
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM)
describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
# commit date: see ISO-8601 comment in git_versions_from_keywords()
date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
cwd=root)[0].strip()
pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
return pieces
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"],
"date": None}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None,
"date": pieces.get("date")}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree",
"date": None}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version", "date": None}
|
|
import urllib
import json
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import user_passes_test
from django.core.urlresolvers import reverse
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.forms.util import ErrorList
from crits.core import form_consts
from crits.core.data_tools import json_handler
from crits.core.handsontable_tools import form_to_dict
from crits.core.user_tools import user_can_view_data
from crits.domains.forms import TLDUpdateForm, AddDomainForm
from crits.domains.handlers import edit_domain_name
from crits.domains.handlers import add_new_domain, get_domain_details
from crits.domains.handlers import update_tlds, generate_domain_jtable
from crits.domains.handlers import generate_domain_csv, process_bulk_add_domain
from crits.objects.forms import AddObjectForm
@user_passes_test(user_can_view_data)
def domain_detail(request, domain):
"""
Generate the Domain details page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param domain: The domain to get details for.
:type domain: str
:returns: :class:`django.http.HttpResponse`
"""
template = "domain_detail.html"
(new_template, args) = get_domain_details(domain,
request.user.username)
if new_template:
template = new_template
return render_to_response(template,
args,
RequestContext(request))
@user_passes_test(user_can_view_data)
def bulk_add_domain(request):
"""
Bulk add domains via a bulk upload form.
Args:
request: The Django context which contains information about the
session and key/value pairs for the bulk add domains request
Returns:
If the request is not a POST and not a Ajax call then:
Returns a rendered HTML form for a bulk add of domains
If the request is a POST and a Ajax call then:
Returns a response that contains information about the
status of the bulk uploaded domains. This may include information
such as domains that failed or successfully added. This may
also contain helpful status messages about each operation.
"""
formdict = form_to_dict(AddDomainForm(request.user))
if request.method == "POST" and request.is_ajax():
response = process_bulk_add_domain(request, formdict);
return HttpResponse(json.dumps(response,
default=json_handler),
content_type="application/json")
else:
objectformdict = form_to_dict(AddObjectForm(request.user))
return render_to_response('bulk_add_default.html',
{'formdict': formdict,
'objectformdict': objectformdict,
'title': "Bulk Add Domains",
'table_name': 'domain',
'local_validate_columns': [form_consts.Domain.DOMAIN_NAME],
'custom_js': "domain_handsontable.js",
'is_bulk_add_objects': True},
RequestContext(request));
@user_passes_test(user_can_view_data)
def domains_listing(request,option=None):
"""
Generate the Domain listing page.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param option: Action to take.
:type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'.
:returns: :class:`django.http.HttpResponse`
"""
if option == "csv":
return generate_domain_csv(request)
return generate_domain_jtable(request, option)
@user_passes_test(user_can_view_data)
def add_domain(request):
"""
Add a domain. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponse`
"""
if request.is_ajax() and request.method == "POST":
add_form = AddDomainForm(request.user, request.POST)
result = False
retVal = {}
errors = []
if add_form.is_valid():
errors = []
data = add_form.cleaned_data
(result, errors, retVal) = add_new_domain(data,
request,
errors)
if errors:
if not 'message' in retVal:
retVal['message'] = ""
elif not isinstance(retVal['message'], str):
retVal['message'] = str(retVal['message'])
for e in errors:
if 'Domain' in e or 'TLD' in e:
dom_form_error = add_form._errors.setdefault("domain",
ErrorList())
dom_form_error.append('Invalid Domain')
elif 'IP' in e:
ip_form_error = add_form._errors.setdefault("ip",
ErrorList())
ip_form_error.append('Invalid IP')
retVal['message'] += '<div>' + str(e) + '</div>'
if not result:
retVal['form'] = add_form.as_table()
retVal['success'] = result
return HttpResponse(json.dumps(retVal,
default=json_handler),
content_type="application/json")
else:
return render_to_response("error.html",
{"error" : 'Expected POST' },
RequestContext(request))
@user_passes_test(user_can_view_data)
def edit_domain(request, domain):
"""
Edit a domain. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:param domain: The domain to edit.
:type domain: str
:returns: :class:`django.http.HttpResponse`
"""
if request.method == "POST" and request.is_ajax():
new_name = request.POST.get('value')
analyst = request.user.username
if edit_domain_name(domain, new_name, analyst):
return HttpResponse(new_name)
else:
return HttpResponse(domain)
else:
return render_to_response("error.html",
{"error" : 'Expected AJAX POST' },
RequestContext(request))
@user_passes_test(user_can_view_data)
def domain_search(request):
"""
Search for domains.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
query = {}
query[request.GET.get('search_type', '')]=request.GET.get('q', '').strip()
#return render_to_response('error.html', {'error': query})
return HttpResponseRedirect(reverse('crits.domains.views.domains_listing')
+ "?%s" % urllib.urlencode(query))
@user_passes_test(user_can_view_data)
def tld_update(request):
"""
Update TLDs. Should be an AJAX POST.
:param request: Django request.
:type request: :class:`django.http.HttpRequest`
:returns: :class:`django.http.HttpResponseRedirect`
"""
if request.method == 'POST':
form = TLDUpdateForm(request.POST, request.FILES)
if form.is_valid():
filedata = request.FILES['filedata']
result = update_tlds(filedata)
if result['success']:
response = {'success': True,
'message': 'Success! <a href="%s">Go to Domains.</a>'
% reverse('crits.domains.views.domains_listing')}
else:
response = {'success': False, 'form': form.as_table()}
else:
response = {'success': False, 'form': form.as_table()}
return render_to_response('file_upload_response.html',
{'response': json.dumps(response)},
RequestContext(request))
else:
return render_to_response('error.html',
{'error': 'Expected POST'},
RequestContext(request))
|
|
# encoding: utf-8
"""Gherkin step implementations for chart axis features."""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from behave import given, then, when
from pptx import Presentation
from pptx.enum.chart import XL_AXIS_CROSSES, XL_CATEGORY_TYPE
from helpers import test_pptx
# given ===================================================
@given('a {axis_type} axis')
def given_a_axis_type_axis(context, axis_type):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = {
'category': chart.category_axis,
'value': chart.value_axis,
}[axis_type]
@given('a major gridlines')
def given_a_major_gridlines(context):
prs = Presentation(test_pptx('cht-gridlines-props'))
axis = prs.slides[0].shapes[0].chart.value_axis
context.gridlines = axis.major_gridlines
@given('a value axis having category axis crossing of {crossing}')
def given_a_value_axis_having_cat_ax_crossing_of(context, crossing):
slide_idx = {
'automatic': 0,
'maximum': 2,
'minimum': 3,
'2.75': 4,
'-1.5': 5,
}[crossing]
prs = Presentation(test_pptx('cht-axis-props'))
context.value_axis = prs.slides[slide_idx].shapes[0].chart.value_axis
@given('an axis')
def given_an_axis(context):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {a_or_no} title')
def given_an_axis_having_a_or_no_title(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
context.axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
@given('an axis having {major_or_minor} gridlines')
def given_an_axis_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.value_axis
@given('an axis having {major_or_minor} unit of {value}')
def given_an_axis_having_major_or_minor_unit_of_value(
context, major_or_minor, value):
slide_idx = 0 if value == 'Auto' else 1
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.value_axis
@given('an axis of type {cls_name}')
def given_an_axis_of_type_cls_name(context, cls_name):
slide_idx = {
'CategoryAxis': 0,
'DateAxis': 6,
}[cls_name]
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.axis = chart.category_axis
@given('an axis not having {major_or_minor} gridlines')
def given_an_axis_not_having_major_or_minor_gridlines(context, major_or_minor):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[0].shapes[0].chart
context.axis = chart.category_axis
@given('an axis title')
def given_an_axis_title(context):
prs = Presentation(test_pptx('cht-axis-props'))
context.axis_title = prs.slides[7].shapes[0].chart.value_axis.axis_title
@given('an axis title having {a_or_no} text frame')
def given_an_axis_title_having_a_or_no_text_frame(context, a_or_no):
prs = Presentation(test_pptx('cht-axis-props'))
chart = prs.slides[7].shapes[0].chart
axis = {
'a': chart.value_axis,
'no': chart.category_axis,
}[a_or_no]
context.axis_title = axis.axis_title
@given('tick labels having an offset of {setting}')
def given_tick_labels_having_an_offset_of_setting(context, setting):
slide_idx = {
'no explicit setting': 0,
'420': 1,
}[setting]
prs = Presentation(test_pptx('cht-ticklabels-props'))
chart = prs.slides[slide_idx].shapes[0].chart
context.tick_labels = chart.category_axis.tick_labels
# when ====================================================
@when('I assign {value} to axis.has_title')
def when_I_assign_value_to_axis_has_title(context, value):
context.axis.has_title = {'True': True, 'False': False}[value]
@when('I assign {value} to axis.has_{major_or_minor}_gridlines')
def when_I_assign_value_to_axis_has_major_or_minor_gridlines(
context, value, major_or_minor):
axis = context.axis
propname = 'has_%s_gridlines' % major_or_minor
new_value = {'True': True, 'False': False}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis.{major_or_minor}_unit')
def when_I_assign_value_to_axis_major_or_minor_unit(
context, value, major_or_minor):
axis = context.axis
propname = '%s_unit' % major_or_minor
new_value = {'8.4': 8.4, '5': 5, 'None': None}[value]
setattr(axis, propname, new_value)
@when('I assign {value} to axis_title.has_text_frame')
def when_I_assign_value_to_axis_title_has_text_frame(context, value):
context.axis_title.has_text_frame = {'True': True, 'False': False}[value]
@when('I assign {value} to tick_labels.offset')
def when_I_assign_value_to_tick_labels_offset(context, value):
new_value = int(value)
context.tick_labels.offset = new_value
@when('I assign {member} to value_axis.crosses')
def when_I_assign_member_to_value_axis_crosses(context, member):
value_axis = context.value_axis
value_axis.crosses = getattr(XL_AXIS_CROSSES, member)
@when('I assign {value} to value_axis.crosses_at')
def when_I_assign_value_to_value_axis_crosses_at(context, value):
new_value = None if value == 'None' else float(value)
context.value_axis.crosses_at = new_value
# then ====================================================
@then('axis.axis_title is an AxisTitle object')
def then_axis_axis_title_is_an_AxisTitle_object(context):
class_name = type(context.axis.axis_title).__name__
assert class_name == 'AxisTitle', 'got %s' % class_name
@then('axis.category_type is XL_CATEGORY_TYPE.{member}')
def then_axis_category_type_is_XL_CATEGORY_TYPE_member(context, member):
expected_value = getattr(XL_CATEGORY_TYPE, member)
category_type = context.axis.category_type
assert category_type is expected_value, 'got %s' % category_type
@then('axis.format is a ChartFormat object')
def then_axis_format_is_a_ChartFormat_object(context):
axis = context.axis
assert type(axis.format).__name__ == 'ChartFormat'
@then('axis.format.fill is a FillFormat object')
def then_axis_format_fill_is_a_FillFormat_object(context):
axis = context.axis
assert type(axis.format.fill).__name__ == 'FillFormat'
@then('axis.format.line is a LineFormat object')
def then_axis_format_line_is_a_LineFormat_object(context):
axis = context.axis
assert type(axis.format.line).__name__ == 'LineFormat'
@then('axis.has_title is {value}')
def then_axis_has_title_is_value(context, value):
axis = context.axis
actual_value = axis.has_title
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.has_{major_or_minor}_gridlines is {value}')
def then_axis_has_major_or_minor_gridlines_is_expected_value(
context, major_or_minor, value):
axis = context.axis
actual_value = {
'major': axis.has_major_gridlines,
'minor': axis.has_minor_gridlines,
}[major_or_minor]
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis.major_gridlines is a MajorGridlines object')
def then_axis_major_gridlines_is_a_MajorGridlines_object(context):
axis = context.axis
assert type(axis.major_gridlines).__name__ == 'MajorGridlines'
@then('axis.{major_or_minor}_unit is {value}')
def then_axis_major_or_minor_unit_is_value(context, major_or_minor, value):
axis = context.axis
propname = '%s_unit' % major_or_minor
actual_value = getattr(axis, propname)
expected_value = {
'20.0': 20.0, '8.4': 8.4, '5.0': 5.0, '4.2': 4.2, 'None': None
}[value]
assert actual_value == expected_value, 'got %s' % actual_value
@then('axis_title.format is a ChartFormat object')
def then_axis_title_format_is_a_ChartFormat_object(context):
class_name = type(context.axis_title.format).__name__
assert class_name == 'ChartFormat', 'got %s' % class_name
@then('axis_title.format.fill is a FillFormat object')
def then_axis_title_format_fill_is_a_FillFormat_object(context):
class_name = type(context.axis_title.format.fill).__name__
assert class_name == 'FillFormat', 'got %s' % class_name
@then('axis_title.format.line is a LineFormat object')
def then_axis_title_format_line_is_a_LineFormat_object(context):
class_name = type(context.axis_title.format.line).__name__
assert class_name == 'LineFormat', 'got %s' % class_name
@then('axis_title.has_text_frame is {value}')
def then_axis_title_has_text_frame_is_value(context, value):
actual_value = context.axis_title.has_text_frame
expected_value = {'True': True, 'False': False}[value]
assert actual_value is expected_value, 'got %s' % actual_value
@then('axis_title.text_frame is a TextFrame object')
def then_axis_title_text_frame_is_a_TextFrame_object(context):
class_name = type(context.axis_title.text_frame).__name__
assert class_name == 'TextFrame', 'got %s' % class_name
@then('gridlines.format is a ChartFormat object')
def then_gridlines_format_is_a_ChartFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format).__name__ == 'ChartFormat'
@then('gridlines.format.fill is a FillFormat object')
def then_gridlines_format_fill_is_a_FillFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.fill).__name__ == 'FillFormat'
@then('gridlines.format.line is a LineFormat object')
def then_gridlines_format_line_is_a_LineFormat_object(context):
gridlines = context.gridlines
assert type(gridlines.format.line).__name__ == 'LineFormat'
@then('tick_labels.offset is {value}')
def then_tick_labels_offset_is_expected_value(context, value):
expected_value = int(value)
tick_labels = context.tick_labels
assert tick_labels.offset == expected_value, (
'got %s' % tick_labels.offset
)
@then('value_axis.crosses is {member}')
def then_value_axis_crosses_is_value(context, member):
value_axis = context.value_axis
expected_value = getattr(XL_AXIS_CROSSES, member)
assert value_axis.crosses == expected_value, 'got %s' % value_axis.crosses
@then('value_axis.crosses_at is {value}')
def then_value_axis_crosses_at_is_value(context, value):
value_axis = context.value_axis
expected_value = None if value == 'None' else float(value)
assert value_axis.crosses_at == expected_value, (
'got %s' % value_axis.crosses_at
)
|
|
from django import forms
from django.utils.safestring import mark_safe
from django.forms import TextInput
from datetimewidget.widgets import DateWidget
from .models import Pledge, FollowUp, Contribution, BulkPayment
from contacts.models import Person
class PledgeForm(forms.ModelForm):
person = forms.ModelChoiceField(queryset=Person.objects.all(),
widget=forms.HiddenInput())
class Meta:
model = Pledge
exclude = ('status_changed',)
widgets = {
'amount': TextInput(attrs={'type': 'text'}),
'payments_start_date': DateWidget(
attrs={},
options={'startView': 2, 'format': 'yyyy-mm-dd'},
bootstrap_version=3
),
}
class FollowUpForm(forms.ModelForm):
pledge = forms.ModelChoiceField(queryset=Pledge.objects.all(),
widget=forms.HiddenInput())
next_payment_date = forms.DateField(
required=False,
widget=DateWidget(
attrs={},
options={'startView': 2, 'format': 'yyyy-mm-dd'},
bootstrap_version=3
),
label='Next expected payment date',
)
def save(self, commit=True):
instance = super(FollowUpForm, self).save()
# if we defined new date of next payment we will save it to the pledge
if self.cleaned_data['next_payment_date']:
instance.pledge.payments_start_date = self.cleaned_data['next_payment_date']
instance.pledge.save()
return instance
def __init__(self, next_payment_date, *args, **kwargs):
super(FollowUpForm, self).__init__(*args, **kwargs)
self.fields['next_payment_date'].initial = next_payment_date
class Meta:
model = FollowUp
exclude = []
class NoInput(forms.Widget):
def render(self, name, value, attrs=None):
return mark_safe(value)
class StaticField(forms.Field):
widget = NoInput
def clean(self, value):
return
class ContributionForm(forms.ModelForm):
def __init__(self, person, request=None, user=None, *args, **kwargs):
super(ContributionForm, self).__init__(*args, **kwargs)
if request and request.user.default_usa_receipt:
self.fields['receipt_type'].initial = 'usa-receipt'
self.fields['pledge'].queryset = Pledge.objects.filter(
person=person)
instance = getattr(self, 'instance', None)
if instance and instance.pk and instance.serial_number \
and instance.payment_method in ['cashl', 'cashf'] \
and not user.has_perm('contributions.can_edit_completed'):
self.fields['amount'] = StaticField()
self.fields['currency'] = StaticField()
self.fields['receipt_date'] = StaticField()
self.fields['is_external'] = StaticField()
if instance.status == 'completed':
self.fields['status'] = StaticField()
self.fields['cleared_on'] = StaticField()
self.fields['payment_method'] = StaticField()
if not self.is_bound:
if None not in (self.instance, self.instance.bulk_payment):
self.fields['bulk_payment'].queryset = BulkPayment.objects. \
filter(pk=self.instance.bulk_payment.pk)
else:
self.fields['bulk_payment'].queryset = BulkPayment.objects. \
none()
if None not in (self.instance, self.instance.collector):
self.fields['collector'].queryset = Person.objects.filter(
pk=self.instance.collector.pk)
print(self.fields['collector'].queryset)
else:
self.fields['collector'].queryset = Person.objects.none()
def clean(self):
for name, field in self.fields.items():
if isinstance(field, StaticField):
self.cleaned_data.update({name: self.initial[name]})
return self.cleaned_data
class Meta:
model = Contribution
exclude = ('status_changed', 'deposited_status',
'deposited_status_changed')
widgets = {
'collector': forms.Select(attrs={'class': 'autocomplete'}),
'amount': TextInput(attrs={'type': 'text'}),
'foreign_amount': TextInput(attrs={'type': 'text'}),
'dated': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
'receipt_date': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
'cleared_on': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
}
class BulkPaymentForm(forms.ModelForm):
person = forms.ModelChoiceField(queryset=Person.objects.all(),
widget=forms.HiddenInput())
def __init__(self, person, *args, **kwargs):
super(BulkPaymentForm, self).__init__(*args, **kwargs)
instance = getattr(self, 'instance', None)
if instance and instance.pk and instance.serial_number:
self.fields['amount'] = StaticField()
self.fields['currency'] = StaticField()
self.fields['receipt_date'] = StaticField()
if instance.status == 'completed':
self.fields['status'] = StaticField()
self.fields['cleared_on'] = StaticField()
self.fields['payment_method'] = StaticField()
def clean(self):
for name, field in self.fields.items():
if isinstance(field, StaticField):
self.cleaned_data.update({name: self.initial[name]})
return self.cleaned_data
class Meta:
model = BulkPayment
exclude = ('status_changed',)
widgets = {
'amount': TextInput(attrs={'type': 'text'}),
'dated': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
'receipt_date': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
'cleared_on': DateWidget(
attrs={},
options={
'startView': 2,
'format': 'yyyy-mm-dd',
},
bootstrap_version=3
),
}
|
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('academics', '0022_auto_20160203_1038'),
]
operations = [
migrations.CreateModel(
name='Ethnicity',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('ethnicity', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='HistoricalEthnicity',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('ethnicity', models.CharField(max_length=200)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical ethnicity',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalMealTime',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical meal time',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalPinnedStudent',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical pinned student',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalSeatFiller',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical seat filler',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTable',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='HistoricalTableAssignment',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, blank=True, auto_created=True)),
('waitor', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'historical table assignment',
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
},
),
migrations.CreateModel(
name='Layout',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='MealTime',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('name', models.CharField(max_length=200)),
('include_boarding_students', models.BooleanField(default=False)),
('include_day_students', models.BooleanField(default=False)),
('include_grades', models.ManyToManyField(to='academics.Grade')),
],
),
migrations.CreateModel(
name='PinnedStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatFiller',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(blank=True, max_length=200)),
('seats', models.IntegerField()),
('display', models.BooleanField(default=False)),
('meal_time', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='SeatingStudent',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('enrollment', models.ForeignKey(to='academics.Enrollment')),
('ethnicity', models.ForeignKey(null=True, to='seating_charts.Ethnicity')),
],
),
migrations.CreateModel(
name='Table',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('description', models.CharField(max_length=200)),
('capacity', models.IntegerField()),
('for_meals', models.ManyToManyField(to='seating_charts.MealTime')),
],
),
migrations.CreateModel(
name='TableAssignment',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True, serialize=False)),
('waitor', models.BooleanField(default=False)),
('meal_time', models.ForeignKey(to='seating_charts.MealTime')),
('student', models.ForeignKey(to='seating_charts.SeatingStudent')),
('table', models.ForeignKey(to='seating_charts.Table')),
],
options={
'permissions': (('view', 'Can view table assignments'), ('edit', 'Can edit table assignments')),
},
),
migrations.AddField(
model_name='seatfiller',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='pinnedstudent',
name='student',
field=models.ForeignKey(to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='pinnedstudent',
name='table',
field=models.ForeignKey(to='seating_charts.Table'),
),
migrations.AddField(
model_name='layout',
name='left_print',
field=models.ForeignKey(related_name='+', to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='layout',
name='right_print',
field=models.ForeignKey(null=True, related_name='+', blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicaltableassignment',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalseatfiller',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='meal_time',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.MealTime'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='student',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.SeatingStudent'),
),
migrations.AddField(
model_name='historicalpinnedstudent',
name='table',
field=models.ForeignKey(null=True, db_constraint=False, related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, blank=True, to='seating_charts.Table'),
),
migrations.AlterUniqueTogether(
name='tableassignment',
unique_together=set([('meal_time', 'student')]),
),
migrations.AlterUniqueTogether(
name='pinnedstudent',
unique_together=set([('student', 'meal_time')]),
),
]
|
|
##
# Copyright (c) 2007-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
import os
import re
import sys
import base64
import itertools
from subprocess import Popen, PIPE, STDOUT
from hashlib import md5, sha1
from twisted.internet import reactor
from twisted.web import client
from twisted.python import failure
from twext.python.log import Logger
log = Logger()
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect
from twext.internet.ssl import simpleClientContextFactory
##
# System Resources (Memory size and processor count)
##
try:
from ctypes import (
cdll,
c_int, c_uint64, c_ulong,
c_char_p, c_void_p,
addressof, sizeof, c_size_t,
)
from ctypes.util import find_library
hasCtypes = True
except ImportError:
hasCtypes = False
if (
sys.platform == "darwin" or
sys.platform.startswith("freebsd")
) and hasCtypes:
libc = cdll.LoadLibrary(find_library("libc"))
def getNCPU():
"""
Returns the number of processors detected
"""
ncpu = c_int(0)
size = c_size_t(sizeof(ncpu))
libc.sysctlbyname.argtypes = [
c_char_p, c_void_p, c_void_p, c_void_p, c_ulong
]
libc.sysctlbyname(
"hw.ncpu",
c_void_p(addressof(ncpu)),
c_void_p(addressof(size)),
None,
0
)
return int(ncpu.value)
def getMemorySize():
"""
Returns the physical amount of RAM installed, in bytes
"""
memsize = c_uint64(0)
size = c_size_t(sizeof(memsize))
libc.sysctlbyname.argtypes = [
c_char_p, c_void_p, c_void_p, c_void_p, c_ulong
]
libc.sysctlbyname(
"hw.memsize",
c_void_p(addressof(memsize)),
c_void_p(addressof(size)),
None,
0
)
return int(memsize.value)
elif sys.platform == "linux2" and hasCtypes:
libc = cdll.LoadLibrary(find_library("libc"))
def getNCPU():
return libc.get_nprocs()
def getMemorySize():
return libc.getpagesize() * libc.get_phys_pages()
else:
def getNCPU():
if not hasCtypes:
msg = " without ctypes"
else:
msg = ""
raise NotImplementedError("getNCPU not supported on %s%s" % (sys.platform, msg))
def getMemorySize():
raise NotImplementedError("getMemorySize not yet supported on %s" % (sys.platform))
def computeProcessCount(minimum, perCPU, perGB, cpuCount=None, memSize=None):
"""
Determine how many process to spawn based on installed RAM and CPUs,
returning at least "mininum"
"""
if cpuCount is None:
try:
cpuCount = getNCPU()
except NotImplementedError, e:
log.error("Unable to detect number of CPUs: {ex}", ex=str(e))
return minimum
if memSize is None:
try:
memSize = getMemorySize()
except NotImplementedError, e:
log.error("Unable to detect amount of installed RAM: {ex}", ex=str(e))
return minimum
countByCore = perCPU * cpuCount
countByMemory = perGB * (memSize / (1024 * 1024 * 1024))
# Pick the smaller of the two:
count = min(countByCore, countByMemory)
# ...but at least "minimum"
return max(count, minimum)
##
# Module management
##
def submodule(module, name):
fullname = module.__name__ + "." + name
try:
submodule = __import__(fullname)
except ImportError, e:
raise ImportError("Unable to import submodule %s from module %s: %s" % (name, module, e))
for m in fullname.split(".")[1:]:
submodule = getattr(submodule, m)
return submodule
##
# Tracebacks
##
from twisted.python.failure import Failure
def printTracebacks(f):
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
Failure().printTraceback()
raise
return wrapper
##
# Helpers
##
class Alternator (object):
"""
Object that alternates between True and False states.
"""
def __init__(self, state=False):
self._state = bool(state)
def state(self):
"""
@return: the current state
"""
state = self._state
self._state = not state
return state
def utf8String(s):
if isinstance(s, unicode):
s = s.encode("utf-8")
return s
##
# Keychain access
##
class KeychainPasswordNotFound(Exception):
"""
Exception raised when the password does not exist
"""
class KeychainAccessError(Exception):
"""
Exception raised when not able to access keychain
"""
passwordRegExp = re.compile(r'password: "(.*)"')
def getPasswordFromKeychain(account):
if os.path.isfile("/usr/bin/security"):
child = Popen(
args=[
"/usr/bin/security", "find-generic-password",
"-a", account, "-g",
],
stdout=PIPE, stderr=STDOUT,
)
output, error = child.communicate()
if child.returncode:
raise KeychainPasswordNotFound(error)
else:
match = passwordRegExp.search(output)
if not match:
error = "Password for %s not found in keychain" % (account,)
raise KeychainPasswordNotFound(error)
else:
return match.group(1)
else:
error = "Keychain access utility ('security') not found"
raise KeychainAccessError(error)
##
# Digest/Basic-capable HTTP GET factory
##
algorithms = {
'md5': md5,
'md5-sess': md5,
'sha': sha1,
}
# DigestCalcHA1
def calcHA1(
pszAlg,
pszUserName,
pszRealm,
pszPassword,
pszNonce,
pszCNonce,
preHA1=None
):
"""
@param pszAlg: The name of the algorithm to use to calculate the digest.
Currently supported are md5 md5-sess and sha.
@param pszUserName: The username
@param pszRealm: The realm
@param pszPassword: The password
@param pszNonce: The nonce
@param pszCNonce: The cnonce
@param preHA1: If available this is a str containing a previously
calculated HA1 as a hex string. If this is given then the values for
pszUserName, pszRealm, and pszPassword are ignored.
"""
if (preHA1 and (pszUserName or pszRealm or pszPassword)):
raise TypeError(("preHA1 is incompatible with the pszUserName, "
"pszRealm, and pszPassword arguments"))
if preHA1 is None:
# We need to calculate the HA1 from the username:realm:password
m = algorithms[pszAlg]()
m.update(pszUserName)
m.update(":")
m.update(pszRealm)
m.update(":")
m.update(pszPassword)
HA1 = m.digest()
else:
# We were given a username:realm:password
HA1 = preHA1.decode('hex')
if pszAlg == "md5-sess":
m = algorithms[pszAlg]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
m.update(pszCNonce)
HA1 = m.digest()
return HA1.encode('hex')
# DigestCalcResponse
def calcResponse(
HA1,
algo,
pszNonce,
pszNonceCount,
pszCNonce,
pszQop,
pszMethod,
pszDigestUri,
pszHEntity,
):
m = algorithms[algo]()
m.update(pszMethod)
m.update(":")
m.update(pszDigestUri)
if pszQop == "auth-int":
m.update(":")
m.update(pszHEntity)
HA2 = m.digest().encode('hex')
m = algorithms[algo]()
m.update(HA1)
m.update(":")
m.update(pszNonce)
m.update(":")
if pszNonceCount and pszCNonce and pszQop:
m.update(pszNonceCount)
m.update(":")
m.update(pszCNonce)
m.update(":")
m.update(pszQop)
m.update(":")
m.update(HA2)
respHash = m.digest().encode('hex')
return respHash
class Unauthorized(Exception):
pass
class AuthorizedHTTPGetter(client.HTTPPageGetter):
log = Logger()
def handleStatus_401(self):
self.quietLoss = 1
self.transport.loseConnection()
if not hasattr(self.factory, "username"):
self.factory.deferred.errback(failure.Failure(Unauthorized("Authentication required")))
return self.factory.deferred
if hasattr(self.factory, "retried"):
self.factory.deferred.errback(failure.Failure(Unauthorized("Could not authenticate user %s with calendar server" % (self.factory.username,))))
return self.factory.deferred
self.factory.retried = True
# self.log.debug("Got a 401 trying to inject [{hdrs}]", hdrs=self.headers)
details = {}
basicAvailable = digestAvailable = False
wwwauth = self.headers.get("www-authenticate")
for item in wwwauth:
if item.startswith("basic "):
basicAvailable = True
if item.startswith("digest "):
digestAvailable = True
wwwauth = item[7:]
def unq(s):
if s[0] == s[-1] == '"':
return s[1:-1]
return s
parts = wwwauth.split(',')
for (k, v) in [p.split('=', 1) for p in parts]:
details[k.strip()] = unq(v.strip())
user = self.factory.username
pswd = self.factory.password
if digestAvailable and details:
digest = calcResponse(
calcHA1(
details.get('algorithm'),
user,
details.get('realm'),
pswd,
details.get('nonce'),
details.get('cnonce')
),
details.get('algorithm'),
details.get('nonce'),
details.get('nc'),
details.get('cnonce'),
details.get('qop'),
self.factory.method,
self.factory.url,
None
)
if details.get('qop'):
response = (
'Digest username="%s", realm="%s", nonce="%s", uri="%s", '
'response=%s, algorithm=%s, cnonce="%s", qop=%s, nc=%s' %
(
user,
details.get('realm'),
details.get('nonce'),
self.factory.url,
digest,
details.get('algorithm'),
details.get('cnonce'),
details.get('qop'),
details.get('nc'),
)
)
else:
response = (
'Digest username="%s", realm="%s", nonce="%s", uri="%s", '
'response=%s, algorithm=%s' %
(
user,
details.get('realm'),
details.get('nonce'),
self.factory.url,
digest,
details.get('algorithm'),
)
)
self.factory.headers['Authorization'] = response
if self.factory.scheme == 'https':
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port, simpleClientContextFactory(self.factory.host)),
self.factory)
else:
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port),
self.factory)
# self.log.debug("Retrying with digest after 401")
return self.factory.deferred
elif basicAvailable:
basicauth = "%s:%s" % (user, pswd)
basicauth = "Basic " + base64.encodestring(basicauth)
basicauth = basicauth.replace("\n", "")
self.factory.headers['Authorization'] = basicauth
if self.factory.scheme == 'https':
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port, simpleClientContextFactory(self.factory.host)),
self.factory)
else:
connect(
GAIEndpoint(reactor, self.factory.host, self.factory.port),
self.factory)
# self.log.debug("Retrying with basic after 401")
return self.factory.deferred
else:
self.factory.deferred.errback(failure.Failure(Unauthorized("Mail gateway not able to process reply; calendar server returned 401 and doesn't support basic or digest")))
return self.factory.deferred
def bestAcceptType(accepts, allowedTypes):
"""
Given a set of Accept headers and the set of types the server can return, determine the best choice
of format to return to the client.
@param accepts: parsed accept headers
@type accepts: C{dict}
@param allowedTypes: list of allowed types in server preferred order
@type allowedTypes: C{list}
"""
# If no Accept present just use the first allowed type - the server's preference
if not accepts:
return allowedTypes[0]
# Get mapping for ordered top-level types for use in subtype wildcard match
toptypes = {}
for allowed in allowedTypes:
mediaType = allowed.split("/")[0]
if mediaType not in toptypes:
toptypes[mediaType] = allowed
result = None
result_qval = 0.0
for content_type, qval in accepts.items():
# Exact match
ctype = "%s/%s" % (content_type.mediaType, content_type.mediaSubtype,)
if ctype in allowedTypes:
if qval > result_qval:
result = ctype
result_qval = qval
# Subtype wildcard match
elif content_type.mediaType != "*" and content_type.mediaSubtype == "*":
if content_type.mediaType in toptypes:
if qval > result_qval:
result = toptypes[content_type.mediaType]
result_qval = qval
# Full wildcard match
elif content_type.mediaType == "*" and content_type.mediaSubtype == "*":
if qval > result_qval:
result = allowedTypes[0]
result_qval = qval
return result
def userAgentProductTokens(user_agent):
"""
Parse an HTTP User-Agent header to extract the product tokens and ignore
any parenthesized comment strings in the header.
@param user_agent: text of User-Agent header value
@type user_agent: L{str}
@return: list of product tokens extracted from the header
@rtype: L{list}
"""
ua_hdr = user_agent.split()
ua_tokens = []
comment = False
for token in ua_hdr:
if comment:
if token.endswith(")"):
comment = False
elif token.startswith("("):
if not token.endswith(")"):
comment = True
else:
ua_tokens.append(token)
return ua_tokens
def matchClientFixes(config, user_agent):
"""
Given a user-agent string, see if it matches any of the configured client fixes.
@param config: the L{config} to match against.
@type config: L{ConfigDict}
@param user_agent: the HTTP User-Agent header value to test.
@type user_agent: L{str}
"""
if len(config.ClientFixesCompiled) == 0 or not user_agent:
return set()
ua_tokens = userAgentProductTokens(user_agent)
client_fixes = set()
for fix, patterns in config.ClientFixesCompiled.items():
for pattern, token in itertools.product(patterns, ua_tokens):
if pattern.match(token) is not None:
client_fixes.add(fix)
break
return client_fixes
|
|
# ----- Copyright (c) 2014 Datacratic. All rights reserved.
"""
This module extends tornados handler classes implementing GET and POST
requests to respond to BID requests
A basic openRtb class helps to interpret requests and prepare responses
Tornado request handlers are extended to handle openRtb, and a
FixedPriceBidder MixIn is used to calculate the bids.
Replacing the Mixin by a smarter strategy using the same HTTP handlers
will create a new bid agent.
There are 2 tornado apps listening at win and event ports
playing the role of a dummy ad server. No action is taken on the events though!
To improve response time a tornado http server is being used to spawn extra
proceses to deal with larger volume of requests.
This is a simplistic implementation and should not be expected to
perform under high load, as it was tested under a few kqps
Currently the average response time for bids is around 14 to 21 ms.
"""
# NOTE: This example is not integrated as part of the rtbkit build system
# as it is an example of how to build a bidder that uses only HTTP interface
# Therefore, TornadoWeb must be manually installed to have this script working
# To do so, execute the following command from your shell:
# $> pip install tornado
# this will install tornado system wide
# to have tornado installed only at the user level do:
# $> pip install --user tornado
__version__ = "0.1"
__all__ = ["OpenRTBResponse",
"FixedPriceBidderMixIn",
"TornadoDummyRequestHandler",
"TornadoBaseBidAgentRequestHandler",
"TornadoFixPriceBidAgentRequestHandler",
"BudgetControl"]
# IMPORTS
# util libs
from copy import deepcopy
import json
import random
# tornado web
from tornado import process
from tornado import netutil
from tornado import httpserver
from tornado.web import RequestHandler, Application, url
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient
from tornado.ioloop import PeriodicCallback
# IMPLEMENTATION
# helper function reads the global config obj from file
def read_config(configFile):
"""read config file into config object"""
cfg = open(configFile)
contents = json.load(cfg)
return contents
# this global is used by the bidder class to configure itself
# because the request handler class makes it hard to pass this
# as a argument
CONFIGURATION_FILE = "../http_config.json"
CONFIG_OBJ = read_config(CONFIGURATION_FILE)
# ----- minimalistic OpenRTB response message class
class OpenRTBResponse():
"""this is a helper class to build basic OpenRTB json objects"""
# field names - constants to avoid magic strings inside the function
key_id = "id"
key_bid = "bid"
key_crid = "crid"
key_ext = "ext"
key_extid = "external-id"
key_ext_creatives = "creative-indexes"
key_priority = "priority"
key_impid = "impid"
key_price = "price"
key_seatbid = "seatbid"
# template obejcts
bid_object = {key_id: "1",
key_impid: "1",
key_price: 1.0,
key_crid: "",
key_ext: {key_priority: 1.0}}
seat_bid_object = {key_bid: [deepcopy(bid_object)]}
bid_response_object = {key_id: "1",
key_seatbid: [deepcopy(seat_bid_object)]}
def get_empty_response(self):
"""returns an object with the scafold of an rtb response
but containing only default values"""
empty_resp = deepcopy(self.bid_response_object)
return empty_resp
def get_default_response(self, req):
"""returns an object with the scafold of an rtb response
and fills some fields based on the request provided"""
default_resp = None
if (self.validate_req(req)):
# since this is a valid request we can return a response
default_resp = deepcopy(self.bid_response_object)
# copy request id
default_resp[self.key_id] = req[self.key_id]
# empty the bid list (we assume only one seat bid for simplicity)
default_resp[self.key_seatbid][0][self.key_bid] = []
# default values for some of the fields of the bid response
id_counter = 0
new_bid = deepcopy(self.bid_object)
# iterate over impressions array from request and
# populate bid list
# NOTE: as an example we are bidding on all the impressions,
# usually that is not what one real bid would look like!!!
for imp in req["imp"]:
# -> imp is the field name @ the req
# dumb bid id, just a counter
id_counter = id_counter + 1
new_bid[self.key_id] = str(id_counter)
# copy impression id as imp for this bid
new_bid[self.key_impid] = imp[self.key_id]
externalId = 0
# try to copy external id to the response
try:
externalId = imp[self.key_ext]["external-ids"][0]
new_bid[self.key_ext][self.key_extid] = externalId
except:
externalId = -1 # and do not add this fiel
# will keep the defaul price as it'll be changed by bidder
# and append this bid into the bid response
ref2bidList = default_resp[self.key_seatbid][0][self.key_bid]
ref2bidList.append(deepcopy(new_bid))
return default_resp
def validate_req(self, req):
""" validates the fields in the request"""
# not implemented yet. should check if the structure from
# the request is according to the spec
# this is just a dummy implementation and we assume everything is fine
valid = True
return valid
# ----- simplistic fixed price bidder MixIn class,
# has to be mixed into a request handler
class FixedPriceBidderMixIn():
"""Dumb bid agent Mixin that bid 100% at $1"""
# mixins do not have their __init__ (constructor) called
# so this class do not have it and the load of configuration
# have to be dealt with by the class that incorporates it!!!
bid_config = None
openRtb = OpenRTBResponse()
def do_config(self, cfgObj):
self.bid_config = {}
self.bid_config["probability"] = cfgObj["bidProbability"]
self.bid_config["price"] = 1.0
self.bid_config["creatives"] = cfgObj["creatives"]
def do_bid(self, bid_req):
# -------------------
# bid logic:
# since this is a fix price bidder,
# just mapping the request to the response
# and using the default price ($1) will do the work.
# -------------------
# assemble defaul response
resp = self.openRtb.get_default_response(bid_req)
# ---
# update bid with price and creatives
# ---
# first we need to buid a dictionary
# that correlates impressions from the request
# to creative lists
# FORMAT: dict[extId][impId] = [creat1..creatN]
impDict = {}
impList = bid_req["imp"]
for imp in impList:
# list of external ids from this impression
extIdsList = imp[OpenRTBResponse.key_ext]["external-ids"]
for extId in extIdsList:
tempDict = {}
creatives = imp[OpenRTBResponse.key_ext][OpenRTBResponse.key_ext_creatives]
impId = imp[OpenRTBResponse.key_id]
tempDict[impId] = creatives[str(extId)]
impDict[extId] = deepcopy(tempDict)
# then we iterate over all bids and choose a a random creative for each bid
# NOTE: we are just doing this fot the first seatbid for simplicity's sake
ref2seatbid0 = resp[OpenRTBResponse.key_seatbid][0]
for bid in ref2seatbid0[OpenRTBResponse.key_bid]:
# update bid price
bid[OpenRTBResponse.key_price] = self.bid_config["price"]
# gets the list of creatives from the ext field in the request
extId = bid[OpenRTBResponse.key_ext][OpenRTBResponse.key_extid]
impId = bid[OpenRTBResponse.key_impid]
creativeList = impDict[extId][impId]
# gets one of the creative indexes randomly
creatNdx = random.choice(creativeList)
# get creative id
creativeId = str(self.bid_config["creatives"][creatNdx]["id"])
# set the cretive id to the bid
bid[OpenRTBResponse.key_crid] = creativeId
return resp
# ----- this dummy handler always answers HTTP 200 to adserver events
# no further action is taken on the events received
class TornadoDummyRequestHandler(RequestHandler):
"""dummy handler just answer 200. Used to run a dummy adserver"""
def post(self):
self.set_status(200)
self.write("")
def get(self):
self.set_status(200)
self.write("")
# ----- tornado request handler class extend
# this class is a general bid Agent hadler.
# bid processing must be implemented in a derived class
class TornadoBaseBidAgentRequestHandler(RequestHandler):
""" extends tornado handler to answer openRtb requests"""
def post(self):
result_body = self.process_req()
self.write(result_body)
def get(self):
result_body = self.process_req()
self.write(result_body)
def process_req(self):
"""processes post requests"""
ret_val = ""
if self.request.headers["Content-Type"].startswith("application/json"):
req = json.loads(self.request.body)
else:
req = None
if (req is not None):
resp = self.process_bid(req)
if (resp is not None):
self.set_status(200)
self.set_header("Content-type", "application/json")
self.set_header("x-openrtb-version", "2.1")
ret_val = json.dumps(resp)
else:
# print("process_bid error")
self.set_status(204)
ret_val = "Error\n"
else:
# print("request not json")
self.set_status(204)
ret_val = "Error\n"
# print DEBUG
# print("req: " + self.request.body)
# print("resp: " + ret_val)
return ret_val
def process_bid(self, req):
"""---TBD in subclass---"""
resp = None
return resp
# ----- minimal fixed price bid agent implementation.
# just extends base request handler class and mix in fix price strategy
class TornadoFixPriceBidAgentRequestHandler(TornadoBaseBidAgentRequestHandler,
FixedPriceBidderMixIn):
""" This class extends TornadoBaseBidAgentRequestHandler
The bidding logic is provided by a external object passed as
parameter to the constructor"""
def __init__(self, application, request, **kwargs):
"""constructor just call parent INIT and run MixIn's do_config"""
super(TornadoBaseBidAgentRequestHandler, self).__init__(application, request, **kwargs)
if (self.bid_config is None):
# due to the way this class is instantiated
# we have to use this global var
self.do_config(CONFIG_OBJ["ACS"]["Config"])
def process_bid(self, req):
"""process bid request by calling bidder mixin do_bid() method"""
resp = self.do_bid(req)
return resp
# ----- callback funtion used by Budget pacer
def handle_async_request(response):
""" this callback function will handle the response from
the AsyncHTTPClient call to the banker"""
if response.error:
print ("Request Error!")
else:
print ("Request response OK")
print response.body
# ----- Budget allocation class do top up budget for bid agent account
class BudgetControl(object):
"""send rest requests to the banker to pace the budget)"""
def start(self, cfgObj):
"""config pacer"""
self.body = '{"USD/1M": ' + str(cfgObj["Banker"]["Budget"]) + '}'
self.headers = {"Accept": "application/json"}
self.url = "http://" + cfgObj["Banker"]["Ip"]
self.url = self.url + ":" + str(cfgObj["Banker"]["Port"])
acc = cfgObj["ACS"]["Config"]["account"]
self.url = self.url + "/v1/accounts/" + acc[0]+":"+acc[1] + "/balance"
self.http_client = AsyncHTTPClient()
# register with ACS
self.acs_register(cfgObj["ACS"])
# call the first budget pace request
self.http_request()
def http_request(self):
"""called periodically to updated the budget"""
try:
print("Budgeting: " + self.body)
self.http_client.fetch(self.url, callback=handle_async_request, method='POST', headers=self.headers, body=self.body)
except:
print("pacing - Failed!")
def acs_register(self, cfgObj):
"""calls Agent configurations server to set up this agent"""
url = "http://" + cfgObj["Ip"]
url = url + ":" + str(cfgObj["Port"])
url = url + "/v1/agents/my_http_config/config"
data = json.dumps(cfgObj["Config"])
# send request to ACS
try:
print("ACS reg'ing: " + data)
self.http_client.fetch(url, callback=handle_async_request, method='POST', headers=self.headers, body=data)
except:
print("ACS registration failed")
# ----- test function
def tornado_bidder_run():
"""runs httpapi bidder agent"""
# bind tcp port to launch processes on requests
sockets = netutil.bind_sockets(CONFIG_OBJ["Bidder"]["Port"])
# fork working processes
process.fork_processes(0)
# Tornado app implementation
app = Application([url(r"/", TornadoFixPriceBidAgentRequestHandler)])
# start http servers and attach the web app to it
server = httpserver.HTTPServer(app)
server.add_sockets(sockets)
# perform following actions only in the parent process
process_counter = process.task_id()
if (process_counter == 0):
# run dummy ad server
adserver_win = Application([url(r"/", TornadoDummyRequestHandler)])
winport = CONFIG_OBJ["Bidder"]["Win"]
adserver_win.listen(winport)
adserver_evt = Application([url(r"/", TornadoDummyRequestHandler)])
evtport = CONFIG_OBJ["Bidder"]["Event"]
adserver_evt.listen(evtport)
# --instantiate budget pacer
pacer = BudgetControl()
pacer.start(CONFIG_OBJ)
# add periodic event to call pacer
PeriodicCallback(pacer.http_request, CONFIG_OBJ["Banker"]["Period"]).start()
# main io loop. it will loop waiting for requests
IOLoop.instance().start()
# run test of this module
if __name__ == '__main__':
tornado_bidder_run()
|
|
# coding: utf-8
from django.db import models
from django.utils.translation import ugettext_lazy as _
# http://www.unece.org/cefact/locode/service/location.html
COUNTRIES = (
('AF', _('Afghanistan')),
('AL', _('Albania')),
('DZ', _('Algeria')),
('AS', _('American Samoa')),
('AD', _('Andorra')),
('AO', _('Angola')),
('AI', _('Anguilla')),
('AQ', _('Antarctica')),
('AG', _('Antigua and Barbuda')),
('AR', _('Argentina')),
('AM', _('Armenia')),
('AW', _('Aruba')),
('AU', _('Australia')),
('AT', _('Austria')),
('AZ', _('Azerbaijan')),
('BS', _('Bahamas')),
('BH', _('Bahrain')),
('BD', _('Bangladesh')),
('BB', _('Barbados')),
('BY', _('Belarus')),
('BE', _('Belgium')),
('BZ', _('Belize')),
('BJ', _('Benin')),
('BM', _('Bermuda')),
('BT', _('Bhutan')),
('BO', _('Bolivia')),
('BQ', _('Bonaire, Sint Eustatius and Saba')),
('BA', _('Bosnia and Herzegovina')),
('BW', _('Botswana')),
('BR', _('Brazil')),
('IO', _('British Indian Ocean Territory')),
('BN', _('Brunei Darussalam')),
('BG', _('Bulgaria')),
('BF', _('Burkina Faso')),
('BI', _('Burundi')),
('KH', _('Cambodia')),
('CM', _('Cameroon')),
('CA', _('Canada')),
('CV', _('Cape Verde')),
('KY', _('Cayman Islands')),
('CF', _('Central African Republic')),
('TD', _('Chad')),
('CL', _('Chile')),
('CN', _('China')),
('CX', _('Christmas Island')),
('CC', _('Cocos (Keeling) Islands')),
('CO', _('Colombia')),
('KM', _('Comoros')),
('CG', _('Congo')),
('CD', _('Congo, The Democratic Republic of the')),
('CK', _('Cook Islands')),
('CR', _('Costa Rica')),
('CI', _('Ivory Coast')),
('HR', _('Croatia')),
('CU', _('Cuba')),
('CW', _('Curacao')),
('CY', _('Cyprus')),
('CZ', _('Czech Republic')),
('DK', _('Denmark')),
('DJ', _('Djibouti')),
('DM', _('Dominica')),
('DO', _('Dominican Republic')),
('EC', _('Ecuador')),
('EG', _('Egypt')),
('SV', _('El Salvador')),
('GQ', _('Equatorial Guinea')),
('ER', _('Eritrea')),
('EE', _('Estonia')),
('ET', _('Ethiopia')),
('FK', _('Falkland Islands (Malvinas)')),
('FO', _('Faroe Islands')),
('FJ', _('Fiji')),
('FI', _('Finland')),
('FR', _('France')),
('GF', _('French Guiana')),
('PF', _('French Polynesia')),
('TF', _('French Southern Territories')),
('GA', _('Gabon')),
('GM', _('Gambia')),
('GE', _('Georgia')),
('DE', _('Germany')),
('GH', _('Ghana')),
('GI', _('Gibraltar')),
('GR', _('Greece')),
('GL', _('Greenland')),
('GD', _('Grenada')),
('GP', _('Guadeloupe')),
('GU', _('Guam')),
('GT', _('Guatemala')),
('GG', _('Guernsey')),
('GN', _('Guinea')),
('GW', _('Guinea-Bissau')),
('GY', _('Guyana')),
('HT', _('Haiti')),
('HM', _('Heard Island and McDonald Islands')),
('VA', _('Holy See (Vatican City State)')),
('HN', _('Honduras')),
('HK', _('Hong Kong')),
('HU', _('Hungary')),
('IS', _('Iceland')),
('IN', _('India')),
('ID', _('Indonesia')),
('XZ', _('Installations in International Waters')),
('IR', _('Iran, Islamic Republic of')),
('IQ', _('Iraq')),
('IE', _('Ireland')),
('IM', _('Isle of Man')),
('IL', _('Israel')),
('IT', _('Italy')),
('JM', _('Jamaica')),
('JP', _('Japan')),
('JE', _('Jersey')),
('JO', _('Jordan')),
('KZ', _('Kazakhstan')),
('KE', _('Kenya')),
('KI', _('Kiribati')),
('KP', _('Korea, Democratic People\'s Republic of')),
('KR', _('Korea, Republic of')),
# see http://geonames.wordpress.com/2010/03/08/xk-country-code-for-kosovo/
('XK', _('Kosovo')),
('KW', _('Kuwait')),
('KG', _('Kyrgyzstan')),
('LA', _('Lao People\'s Democratic Republic')),
('LV', _('Latvia')),
('LB', _('Lebanon')),
('LS', _('Lesotho')),
('LR', _('Liberia')),
('LY', _('Libyan Arab Jamahiriya')),
('LI', _('Liechtenstein')),
('LT', _('Lithuania')),
('LU', _('Luxembourg')),
('MO', _('Macao')),
('MK', _('Macedonia, The former Yugoslav Republic of')),
('MG', _('Madagascar')),
('MW', _('Malawi')),
('MY', _('Malaysia')),
('MV', _('Maldives')),
('ML', _('Mali')),
('MT', _('Malta')),
('MH', _('Marshall Islands')),
('MQ', _('Martinique')),
('MR', _('Mauritania')),
('MU', _('Mauritius')),
('YT', _('Mayotte')),
('MX', _('Mexico')),
('FM', _('Micronesia, Federated States of')),
('MD', _('Moldova, Republic of')),
('MC', _('Monaco')),
('MN', _('Mongolia')),
('ME', _('Montenegro')),
('MS', _('Montserrat')),
('MA', _('Morocco')),
('MZ', _('Mozambique')),
('MM', _('Myanmar')),
('NA', _('Namibia')),
('NR', _('Nauru')),
('NP', _('Nepal')),
('NL', _('Netherlands')),
('NC', _('New Caledonia')),
('NZ', _('New Zealand')),
('NI', _('Nicaragua')),
('NE', _('Niger')),
('NG', _('Nigeria')),
('NU', _('Niue')),
('NF', _('Norfolk Island')),
('MP', _('Northern Mariana Islands')),
('NO', _('Norway')),
('OM', _('Oman')),
('PK', _('Pakistan')),
('PW', _('Palau')),
('PS', _('Palestinian Territory, Occupied')),
('PA', _('Panama')),
('PG', _('Papua New Guinea')),
('PY', _('Paraguay')),
('PE', _('Peru')),
('PH', _('Philippines')),
('PN', _('Pitcairn')),
('PL', _('Poland')),
('PT', _('Portugal')),
('PR', _('Puerto Rico')),
('QA', _('Qatar')),
('RE', _('Reunion')),
('RO', _('Romania')),
('RU', _('Russian Federation')),
('RW', _('Rwanda')),
('SH', _('Saint Helena')),
('KN', _('Saint Kitts and Nevis')),
('LC', _('Saint Lucia')),
('PM', _('Saint Pierre and Miquelon')),
('VC', _('Saint Vincent and the Grenadines')),
('WS', _('Samoa')),
('SM', _('San Marino')),
('ST', _('Sao Tome and Principe')),
('SA', _('Saudi Arabia')),
('SN', _('Senegal')),
('RS', _('Serbia')),
('SC', _('Seychelles')),
('SL', _('Sierra Leone')),
('SG', _('Singapore')),
('SX', _('Sint Maarten (Dutch Part)')),
('SK', _('Slovakia')),
('SI', _('Slovenia')),
('SB', _('Solomon Islands')),
('SO', _('Somalia')),
('ZA', _('South Africa')),
('GS', _('South Georgia and the South Sandwich Islands')),
('SS', _('South Sudan')),
('ES', _('Spain')),
('LK', _('Sri Lanka')),
('SD', _('Sudan')),
('SR', _('Suriname')),
('SJ', _('Svalbard and Jan Mayen')),
('SZ', _('Swaziland')),
('SE', _('Sweden')),
('CH', _('Switzerland')),
('SY', _('Syrian Arab Republic')),
('TW', _('Taiwan, Province of China')),
('TJ', _('Tajikistan')),
('TZ', _('Tanzania, United Republic of')),
('TH', _('Thailand')),
('TL', _('Timor-Leste')),
('TG', _('Togo')),
('TK', _('Tokelau')),
('TO', _('Tonga')),
('TT', _('Trinidad and Tobago')),
('TN', _('Tunisia')),
('TR', _('Turkey')),
('TM', _('Turkmenistan')),
('TC', _('Turks and Caicos Islands')),
('TV', _('Tuvalu')),
('UG', _('Uganda')),
('UA', _('Ukraine')),
('AE', _('United Arab Emirates')),
('GB', _('United Kingdom')),
('US', _('United States')),
('UM', _('United States Minor Outlying Islands')),
('UY', _('Uruguay')),
('UZ', _('Uzbekistan')),
('VU', _('Vanuatu')),
('VE', _('Venezuela')),
('VN', _('Viet Nam')),
('VG', _('Virgin Islands, British')),
('VI', _('Virgin Islands, U.S.')),
('WF', _('Wallis and Futuna')),
('EH', _('Western Sahara')),
('YE', _('Yemen')),
('ZM', _('Zambia')),
('ZW', _('Zimbabwe')),
('ZZ', _('Unknown or unspecified country')),
)
class CountryField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs.setdefault('maxlength', 2)
kwargs.setdefault('choices', COUNTRIES)
super().__init__(*args, **kwargs)
def get_internal_type(self):
return "CharField"
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for stochastic graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.bayesflow.python.ops import stochastic_gradient_estimators
from tensorflow.contrib.bayesflow.python.ops import stochastic_tensor_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.distributions import normal
from tensorflow.python.platform import test
sge = stochastic_gradient_estimators
st = stochastic_tensor_impl
class StochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
sigma2 = constant_op.constant([0.1, 0.2, 0.3])
prior_default = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_default.value_type, st.SampleValue))
prior_0 = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.SampleValue())
self.assertTrue(isinstance(prior_0.value_type, st.SampleValue))
with st.value_type(st.SampleValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.SampleValue))
likelihood = st.StochasticTensor(
normal.Normal(loc=prior, scale=sigma2))
self.assertTrue(isinstance(likelihood.value_type, st.SampleValue))
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [prior_default, prior_0, prior, likelihood])
# Also works: tf.convert_to_tensor(prior)
prior_default = array_ops.identity(prior_default)
prior_0 = array_ops.identity(prior_0)
prior = array_ops.identity(prior)
likelihood = array_ops.identity(likelihood)
# Mostly a smoke test for now...
prior_0_val, prior_val, prior_default_val, _ = sess.run(
[prior_0, prior, prior_default, likelihood])
self.assertEqual(prior_0_val.shape, prior_val.shape)
self.assertEqual(prior_default_val.shape, prior_val.shape)
# These are different random samples from the same distribution,
# so the values should differ.
self.assertGreater(np.abs(prior_0_val - prior_val).sum(), 1e-6)
self.assertGreater(np.abs(prior_default_val - prior_val).sum(), 1e-6)
def testMeanValue(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior.value_type, st.MeanValue))
prior_mean = prior.mean()
prior_value = prior.value()
prior_mean_val, prior_value_val = sess.run([prior_mean, prior_value])
self.assertAllEqual(prior_mean_val, mu)
self.assertAllEqual(prior_mean_val, prior_value_val)
def testSampleValueScalar(self):
with self.test_session() as sess:
mu = [[0.0, -1.0, 1.0], [0.0, -1.0, 1.0]]
sigma = constant_op.constant([[1.1, 1.2, 1.3], [1.1, 1.2, 1.3]])
with st.value_type(st.SampleValue()):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (2, 3))
with st.value_type(st.SampleValue(1)):
prior_single = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
self.assertTrue(isinstance(prior_single.value_type, st.SampleValue))
prior_single_value = prior_single.value()
self.assertEqual(prior_single_value.get_shape(), (1, 2, 3))
prior_single_value_val = sess.run([prior_single_value])[0]
self.assertEqual(prior_single_value_val.shape, (1, 2, 3))
with st.value_type(st.SampleValue(2)):
prior_double = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma))
prior_double_value = prior_double.value()
self.assertEqual(prior_double_value.get_shape(), (2, 2, 3))
prior_double_value_val = sess.run([prior_double_value])[0]
self.assertEqual(prior_double_value_val.shape, (2, 2, 3))
def testDistributionEntropy(self):
with self.test_session() as sess:
mu = [0.0, -1.0, 1.0]
sigma = constant_op.constant([1.1, 1.2, 1.3])
with st.value_type(st.MeanValue()):
prior = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
entropy = prior.entropy()
deep_entropy = prior.distribution.entropy()
expected_deep_entropy = normal.Normal(
loc=mu, scale=sigma).entropy()
entropies = sess.run([entropy, deep_entropy, expected_deep_entropy])
self.assertAllEqual(entropies[2], entropies[0])
self.assertAllEqual(entropies[1], entropies[0])
def testSurrogateLoss(self):
with self.test_session():
mu = [[3.0, -4.0, 5.0], [6.0, -7.0, 8.0]]
sigma = constant_op.constant(1.0)
# With default
with st.value_type(st.MeanValue(stop_gradient=True)):
dt = st.StochasticTensor(normal.Normal(loc=mu, scale=sigma))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose(
dt.distribution.log_prob(mu).eval() * 2.0, loss.eval())
# With passed-in loss_fn.
dt = st.StochasticTensor(
normal.Normal(loc=mu, scale=sigma),
dist_value_type=st.MeanValue(stop_gradient=True),
loss_fn=sge.get_score_function_with_constant_baseline(
baseline=constant_op.constant(8.0)))
loss = dt.loss([constant_op.constant(2.0)])
self.assertTrue(loss is not None)
self.assertAllClose((dt.distribution.log_prob(mu) * (2.0 - 8.0)).eval(),
loss.eval())
class ValueTypeTest(test.TestCase):
def testValueType(self):
type_mean = st.MeanValue()
type_reshape = st.SampleValue()
type_full = st.SampleValue()
with st.value_type(type_mean):
self.assertEqual(st.get_current_value_type(), type_mean)
with st.value_type(type_reshape):
self.assertEqual(st.get_current_value_type(), type_reshape)
with st.value_type(type_full):
self.assertEqual(st.get_current_value_type(), type_full)
self.assertEqual(st.get_current_value_type(), type_mean)
with self.assertRaisesRegexp(ValueError, "No value type currently set"):
st.get_current_value_type()
class ObservedStochasticTensorTest(test.TestCase):
def testConstructionAndValue(self):
with self.test_session() as sess:
mu = [0.0, 0.1, 0.2]
sigma = constant_op.constant([1.1, 1.2, 1.3])
obs = array_ops.zeros((2, 3))
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
[obs_val, z_val] = sess.run([obs, z.value()])
self.assertAllEqual(obs_val, z_val)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z])
def testConstructionWithUnknownShapes(self):
mu = array_ops.placeholder(dtypes.float32)
sigma = array_ops.placeholder(dtypes.float32)
obs = array_ops.placeholder(dtypes.float32)
z = st.ObservedStochasticTensor(
normal.Normal(loc=mu, scale=sigma), value=obs)
mu2 = array_ops.placeholder(dtypes.float32, shape=[None])
sigma2 = array_ops.placeholder(dtypes.float32, shape=[None])
obs2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
z2 = st.ObservedStochasticTensor(
normal.Normal(loc=mu2, scale=sigma2), value=obs2)
coll = ops.get_collection(st.STOCHASTIC_TENSOR_COLLECTION)
self.assertEqual(coll, [z, z2])
def testConstructionErrors(self):
mu = [0., 0.]
sigma = [1., 1.]
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3,)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((3, 1)))
self.assertRaises(
ValueError,
st.ObservedStochasticTensor,
normal.Normal(loc=mu, scale=sigma),
value=array_ops.zeros((1, 2), dtype=dtypes.int32))
if __name__ == "__main__":
test.main()
|
|
import ael
import acm
import HTI_FeedTrade_EDD_Util
import HTI_Util
import HTI_Email_Util
import os
import sys, traceback
COL_PRODUCT_ID = 0
COL_INS_TYP = 1
COL_UND_INS = 2
COL_PORTFOLIO_ID = 3
COL_PORTFOLIO_CCY = 4
COL_WAY = 5
COL_CP = 6
COL_QTY = 7
COL_PRICE = 8
COL_MKTV = 9
COL_NTRADE = 10
COL_UPLD = 11
COL_UPLM = 12
COL_UPLY = 13
COL_RPLD = 14
COL_RPLM = 15
COL_RPLY = 16
COL_TPLD = 17
COL_TPLM = 18
COL_TPLY = 19
COL_DIVD = 20
ael_variables = [['posdate', 'Date', 'string', [str(ael.date_today()), 'Today'], 'Today', 1, 0, 'Position Date', None, 1], \
['acquirers', 'Acquirer(s)', 'string', HTI_Util.getAllAcquirers(), 'HTIFS - EDD,HTISEC - EDD', 1, 1, 'Acquirer(s)', None, 1], \
['pfs', 'Portfolio(s)', 'string', HTI_Util.getAllPortfolios(), 'EDD', 1, 1, 'Portfolio(s)', None, 1], \
['fixed_ccy', 'Fixed Currency', 'string', HTI_Util.getAllCurrencies(), 'HKD', 0, 0, 'Fixed Currency', None, 1], \
['fileName', 'File name', 'string', None, 'D:\\temp\\FAPos_to_HORIZON_YYYYMMDD.csv', 1, 0, 'File Name', None, 1], \
['fileName_ccy', 'File name in fixed currency', 'string', None, 'D:\\temp\\FAPos_to_HORIZON_YYYYMMDD_CCY.csv', 1, 0, 'File Name in Fixed Currency', None, 1], \
['success_email_subj', 'Success Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - SUCCESS', 1, 0, 'Sucess Email Subject', None, 1], \
['success_emaillist', 'Success Email List', 'string', None, 'frederick.yh.li@htisec.com', 1, 0, 'Success Email List', None, 1], \
['successEmail', 'Send Success Email', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Send Success Email', None, 1], \
['failure_email_subj', 'Failure Email Subject', 'string', None, 'FA (PROD) : EDD MSS Trade File Upload - FAILED', 1, 0, 'Failure Email Subject', None, 1], \
['failure_emaillist', 'Failure Email List', 'string', None, 'frederick.yh.li@htisec.com', 1, 0, 'Failure Email List', None, 1], \
['failureEmail', 'Send Failure Email', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Send Failure Email', None, 1], \
['mic', 'MIC(s)', 'string', None, 'XHKG,XHKF', 1, 0, 'MIC(s)', None, 1], \
['prod_type', 'Product Type(s)', 'string', HTI_Util.getAllInstypes(), 'Stock,Option,Future/Forward,Warrant', 1, 1, 'Product Type(s)', None, 1], \
['fo_system', 'FO System(s)', 'string', HTI_Util.getAllFOSystems(), 'Horizon', 0, 1, 'FO System(s)', None, 1], \
['horizon_context', 'Horizon Context', 'string', None, 'PT', 1, 0, 'Horizon Context', None, 1], \
['otc', 'OTC', 'string', HTI_Util.get_yesno(), 'N', 0, 0, 'Y, for OTC only, N for Listed only, BLANK for All (both OTC and Listed)', None, 1]]
def ListedOrOtc(acmIns):
if acmIns.Otc() == True:
return 'OTC'
else:
return 'Listed'
def get_trd_record(tf_id, trd_array, asofdate, ccy = ""):
context = acm.GetDefaultContext()
sheet_type = 'FOrderBookSheet'
#create CalculationSpace (Virtual Trading Manager)
calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)
calc_space.SimulateValue
tf = ael.TradeFilter[tf_id]
#pfObj = acm.FPortfolio['EDD']
#print pfObj
#add item to portfolio sheet
nacmTf = acm.FTradeSelection[tf.fltid]
print nacmTf.Size()
return trd_array
def get_ins_qty_and_avgprice(tf_id, pos_array, asofdate, ccy = ""):
context = acm.GetDefaultContext()
sheet_type = 'FPortfolioSheet'
#create CalculationSpace (Virtual Trading Manager)
calc_space = acm.Calculations().CreateCalculationSpace(context, sheet_type)
#simulate sheet settings
if ccy == "":
calc_space.SimulateGlobalValue( 'Position Currency Choice', 'Instrument Curr')
calc_space.SimulateGlobalValue( 'Aggregate Currency Choice', 'Portfolio Curr')
else:
calc_space.SimulateGlobalValue( 'Position Currency Choice', 'Fixed Curr')
calc_space.SimulateGlobalValue( 'Aggregate Currency Choice', 'Fixed Curr')
calc_space.SimulateGlobalValue( 'Fixed Currency', 'HKD')
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss Start Date', 'Inception' )
if asofdate == 'Today':
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date Custom', 'Now' )
else:
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date', 'Custom Date' )
calc_space.SimulateGlobalValue( 'Portfolio Profit Loss End Date Custom', asofdate )
calc_space.SimulateGlobalValue( 'Valuation Date', asofdate )
calc_space.SimulateValue
tf = ael.TradeFilter[tf_id]
#pfObj = acm.FPortfolio['EDD']
#print pfObj
#add item to portfolio sheet
nacmTf = acm.FTradeSelection[tf.fltid]
top_node = calc_space.InsertItem(nacmTf)
groupers = [acm.Risk().GetGrouperFromName('Trade Portfolio')]
chained_grouper = acm.FChainedGrouper(groupers)
top_node.ApplyGrouper(chained_grouper)
calc_space.Refresh()
tf_iter = calc_space.RowTreeIterator().FirstChild()
pf_iter = tf_iter.FirstChild()
while pf_iter:
row = pf_iter.Tree().Item()
prfid = str(row).replace("'", "")
#print 'prfid', prfid
#acmInsCal = acm.FInstrumentCalculations()
#acmInsCal.RealizedProfitLoss(calc_space, prfid, 'Inception', asofdate)
for ins_row in row.Children():
insid = ins_row.StringKey()
horizon_prod_id = ''
acm_ins = acm.FInstrument[insid]
#cs = acm.Calculations().CreateStandardCalculationsSpaceCollection()
ins_calcs = acm_ins.Calculation()
#print acm_ins
localcode = ''
if acm_ins.AdditionalInfo().Local_Exchange_Code() != None:
if acm_ins.AdditionalInfo().Local_Exchange_Code().strip() != '':
localcode = acm_ins.AdditionalInfo().Local_Exchange_Code().strip()
insMic = ''
if acm_ins.AdditionalInfo().MIC() != None:
if acm_ins.AdditionalInfo().MIC().strip() != '':
insMic = acm_ins.AdditionalInfo().MIC().strip()
horizon_prod_id = localcode + '@' + insMic
#print 'insid', insid
#pos = float(0)
avg_price = float(0)
prf_ccy = calc_space.CreateCalculation(ins_row, 'Portfolio Currency').FormattedValue()
ins_typ = ListedOrOtc(acm_ins) + ' ' + calc_space.CreateCalculation(ins_row, 'Instrument Type').FormattedValue()
und_ins = calc_space.CalculateValue(ins_row, 'Underlying Instrument')
und_ins = insid if str(und_ins) == "" else und_ins
pos = float(calc_space.CalculateValue(ins_row, 'Portfolio Position'))
cp = 'C' if calc_space.CalculateValue(ins_row, 'Call or Put') == "Call" else 'P'
avg_price = float(calc_space.CalculateValue(ins_row, 'Portfolio Average Price'))
mktv = float(calc_space.CalculateValue(ins_row, 'Portfolio PL Market Value'))
nTrade = calc_space.CreateCalculation(ins_row, 'Portfolio Theoretical Profit And Loss Trade Attribution').FormattedValue().replace(',','')
upld = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Daily').FormattedValue().replace(',','')
uplm = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Monthly').FormattedValue().replace(',','')
uply = calc_space.CreateCalculation(ins_row, 'Portfolio Unrealized Profit and Loss Yearly').FormattedValue().replace(',','')
rpld = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Daily').FormattedValue().replace(',','')
rplm = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Monthly').FormattedValue().replace(',','')
rply = calc_space.CreateCalculation(ins_row, 'Portfolio Realized Profit and Loss Yearly').FormattedValue().replace(',','')
tpld = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Daily').FormattedValue().replace(',','')
tplm = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Monthly').FormattedValue().replace(',','')
tply = calc_space.CreateCalculation(ins_row, 'Portfolio Total Profit and Loss Yearly').FormattedValue().replace(',','')
divd = calc_space.CreateCalculation(ins_row, 'Portfolio Dividends').FormattedValue().replace(',','')
#pos_row = [prfid, insid, str(avg_price), str(pos), bs, horizon_context]
if float(pos) >= 0:
bs = 'B'
else:
bs = 'S'
if horizon_prod_id == '@':
horizon_prod_id = insid
#pos_row = [prfid, horizon_prod_id, str(avg_price), str(pos), bs, horizon_context]
pos_row = [insid, ins_typ, und_ins, prfid, prf_ccy, bs, cp, str(pos), str(avg_price), str(mktv), str(0 if nTrade == "" else float(nTrade))
, str(0 if upld == "" else float(upld)), str(0 if uplm == "" else float(uplm)), str(0 if uply == "" else float(uply))
, str(0 if rpld == "" else float(rpld)), str(0 if rplm == "" else float(rplm)), str(0 if rply == "" else float(rply))
, str(0 if tpld == "" else float(tpld)), str(0 if tplm == "" else float(tplm)), str(0 if tply == "" else float(tply))
, str(0 if divd == "" else float(divd))
]
pos_array.append(pos_row)
pf_iter = pf_iter.NextSibling()
#print 'pf_iter', pf_iter
calc_space.Clear()
return pos_array
def sortArray(x, y):
i = 0
len_array = len(x)
while i <= len_array - 1:
if x[i] > y[i]:
return 1
else:
return -1
i = i + 1
return 0
def ael_main(dict):
ret = False
FAILURE_EMAILLIST = dict['failure_emaillist']
print 'Failure Email List:', FAILURE_EMAILLIST
FAILURE_RECIPIENTS = FAILURE_EMAILLIST.split(',')
SUCCESS_EMAILLIST = dict['success_emaillist']
print 'Success Email List:', SUCCESS_EMAILLIST
SUCCESS_RECIPIENTS = SUCCESS_EMAILLIST.split(',')
successSubject = dict['success_email_subj']
errSubject = dict['failure_email_subj']
send_failureEmail = dict['failureEmail']
send_successEmail = dict['successEmail']
horizon_context = dict['horizon_context']
asofdate = dict['posdate']
if asofdate == 'Today':
posdate = ael.date_today()
else:
posdate = ael.date[asofdate]
fileName = dict['fileName']
fileName_ccy = dict['fileName_ccy']
fileName = fileName.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
fileName_ccy = fileName_ccy.replace("YYYYMMDD", posdate.to_string('%Y%m%d'))
# Acquirers
acq_array_list = dict['acquirers']
acq_list = ''
for acq in acq_array_list:
if acq_list == '':
acq_list = "'" + acq + "'"
else:
acq_list = acq_list + ",'" + acq + "'"
# FO Systems
fo_system_list = dict['fo_system']
fosys_list = ''
for fosys in fo_system_list:
if fosys_list == '':
fosys_list = "'" + fosys + "'"
else:
fosys_list = fosys_list + ",'" + fosys+ "'"
# MICs
mics = dict['mic']
mic_list = mics.split(',')
mic_code_list = ''
for mic_code in mic_list:
#print mic_code
if mic_code_list == '':
mic_code_list = "'" + mic_code + "'"
else:
mic_code_list = mic_code_list + ",'" + mic_code+ "'"
# Product Types
prod_type_list = dict['prod_type']
ptype_list = ''
for ptype in prod_type_list:
if ptype_list == '':
ptype_list = "'" + ptype + "'"
else:
ptype_list = ptype_list + ",'" + ptype+ "'"
otc = dict['otc']
if otc == 'Y':
otc = 'Yes'
else:
otc = 'No'
fixed_ccy = dict['fixed_ccy']
# Portfolios
portfolios = dict['pfs']
portfolioList2 = []
pf_list = ''
portfolioList2.extend(portfolios)
for port in portfolioList2:
prfid = port
pfarr = []
pPf = ael.Portfolio[prfid]
HTI_FeedTrade_EDD_Util.getChildPortfolio(pPf, pfarr)
if len(pfarr) > 0:
for pf in pfarr:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + pf + "'"
else:
if len(pf_list) != 0:
pf_list = pf_list + ','
pf_list = pf_list + "'" + prfid + "'"
strSql = """
select t.trdnbr
from instrument i, trade t, party acq, portfolio pf
where i.insaddr = t.insaddr
and t.status not in ('Void', 'Simulated')
@otc_str
and t.acquirer_ptynbr = acq.ptynbr
and t.prfnbr = pf.prfnbr
and acq.ptyid in (@accquirer_list)
and (i.exp_day >= '@dt' or i.exp_day = '0000-01-01')
and i.instype in (@ptype_list)
and pf.prfid in (@portfolio_list)
"""
if otc == None:
strSql = strSql.replace("@otc_str", "and i.otc = '@otc'")
strSql = strSql.replace('@otc', otc)
elif otc == 'Yes':
strSql = strSql.replace("@otc_str", "and i.otc = '@otc'")
strSql = strSql.replace('@otc', otc)
else:
strSql = strSql.replace("@otc_str", " ")
if fo_system_list == '':
strSql = strSql.replace("@fosys", "and as.value in (@fosys_list)")
else:
strSql = strSql.replace("@fosys", " ")
strSql = strSql.replace('@accquirer_list', acq_list)
strSql = strSql.replace('@portfolio_list', pf_list)
strSql = strSql.replace('@dt', posdate.to_string('%Y-%m-%d'))
strSql = strSql.replace('@fosys_list', fosys_list)
strSql = strSql.replace('@mic_code_list', mic_code_list)
strSql = strSql.replace('@ptype_list', ptype_list)
#print 'strSql', strSql
tobject = ael.TextObject.read('type="SQL Query" and name="%s"' % ('tf_edd_trade_qry'))
tobject_c = tobject.clone()
tobject_c.set_text(strSql)
tobject_c.commit()
ael.poll()
trd_array = []
pos_array = []
pos_array_ccy = []
trd_array = get_trd_record('TF_EDD_TRADE', trd_array, asofdate)
#pos_array = get_ins_qty_and_avgprice('TF_EDD_TRADE', pos_array, asofdate)
#pos_array_ccy = get_ins_qty_and_avgprice('TF_EDD_TRADE', pos_array_ccy, asofdate, fixed_ccy)
pos_array.sort(sortArray)
pos_array_ccy.sort(sortArray)
header = "Instrument,Reporting Date,Instrument Type,Underlying,Portfolio,Currency,Position,Call Put,Quantity,Average Price,Market Value,New Trade,UPLD,UPLM,UPLY,RPLD,RPLM,RPLY,TPLD,TPLM,TPLY,Dividends\n"
try:
f1 = open(fileName, "w")
f1.write(header)
for pos_row in pos_array:
#horizon_pf = HTI_FeedTrade_EDD_Util.getHorizonPfbyFAPf(pos_row[COL_PORTFOLIO_ID])
#if horizon_pf == '':
#horizon_pf = pos_row[COL_PORTFOLIO_ID]
line = pos_row[COL_PRODUCT_ID] + ',' + posdate.to_string('%Y-%m-%d') + ',' + str(pos_row[COL_INS_TYP]) + ',' + str(pos_row[COL_UND_INS]) + ',' + pos_row[COL_PORTFOLIO_ID] + ','
line = line + str(pos_row[COL_PORTFOLIO_CCY]) + ',' + pos_row[COL_WAY] + ',' + pos_row[COL_CP] + ',' + str(abs(float(pos_row[COL_QTY]))) + ',' + str(pos_row[COL_PRICE]) + ',' + str(pos_row[COL_MKTV]) + ',' + str(pos_row[COL_NTRADE]) + ','
line = line + str(pos_row[COL_UPLD]) + ',' + str(pos_row[COL_UPLM]) + ',' + str(pos_row[COL_UPLY]) + ','
line = line + str(pos_row[COL_RPLD]) + ',' + str(pos_row[COL_RPLM]) + ',' + str(pos_row[COL_RPLY]) + ','
line = line + str(pos_row[COL_TPLD]) + ',' + str(pos_row[COL_TPLM]) + ',' + str(pos_row[COL_TPLY]) + ','
line = line + str(pos_row[COL_DIVD]) + ','
line = line + '\n'
f1.write(line)
f1.close()
f_ccy = open(fileName_ccy, "w")
f_ccy.write(header)
for pos_row in pos_array_ccy:
#horizon_pf = HTI_FeedTrade_EDD_Util.getHorizonPfbyFAPf(pos_row[COL_PORTFOLIO_ID])
#if horizon_pf == '':
#horizon_pf = pos_row[COL_PORTFOLIO_ID]
line = pos_row[COL_PRODUCT_ID] + ',' + posdate.to_string('%Y-%m-%d') + ',' + str(pos_row[COL_INS_TYP]) + ',' + str(pos_row[COL_UND_INS]) + ',' + pos_row[COL_PORTFOLIO_ID] + ','
line = line + str(pos_row[COL_PORTFOLIO_CCY]) + ',' + pos_row[COL_WAY] + ',' + pos_row[COL_CP] + ',' + str(abs(float(pos_row[COL_QTY]))) + ',' + str(pos_row[COL_PRICE]) + ',' + str(pos_row[COL_MKTV]) + ',' + str(pos_row[COL_NTRADE]) + ','
line = line + str(pos_row[COL_UPLD]) + ',' + str(pos_row[COL_UPLM]) + ',' + str(pos_row[COL_UPLY]) + ','
line = line + str(pos_row[COL_RPLD]) + ',' + str(pos_row[COL_RPLM]) + ',' + str(pos_row[COL_RPLY]) + ','
line = line + str(pos_row[COL_TPLD]) + ',' + str(pos_row[COL_TPLM]) + ',' + str(pos_row[COL_TPLY]) + ','
line = line + str(pos_row[COL_DIVD]) + ','
line = line + '\n'
f_ccy.write(line)
f_ccy.close()
ret = True
finally:
email_content = 'Date: %s' % posdate.to_string('%Y-%m-%d') + '\n'
attached_filename = os.path.basename(fileName)
attached_filedir = os.path.dirname(fileName) + "\\"
if ret:
if send_successEmail == 'Y':
HTI_Email_Util.SendAttachment(SUCCESS_RECIPIENTS, successSubject, email_content, [attached_filedir], [attached_filename], True)
else:
print successSubject
print SUCCESS_RECIPIENTS
print email_content
print fileName
else:
if send_failureEmail == 'Y':
HTI_Email_Util.SendAttachment(FAILURE_RECIPIENTS, errSubject, email_content, [attached_filedir], [attached_filename], True)
else:
print errSubject
print FAILURE_RECIPIENTS
print email_content
print fileName
|
|
#-*- coding:utf-8-*-
#author:cd
import wx
import pyHook
import pythoncom
import time
import win32api
import win32con
import threading
import pickle
from wx.lib.embeddedimage import PyEmbeddedImage
from wx.lib.wordwrap import wordwrap
figurefree = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAA3NCSVQICAjb4U/gAAAC0UlE"
"QVRIib2WPSxrYRjHf62qhJS2yiA+alBJDQY2kZgMEpsIiTAhjdEmBlPHLtI2JhIJNotVIjr6"
"FpNIK0iapsRXqGpxh8f73oN7ucnN6Tuc9Jw8p8/ze5/n/f+P5fn5GbBYLMDLywtQUlICrKys"
"AEtLS0BxcTHQ1tYGTE1NAa+vr4DVauWn9XPEfy5bOp0GioqKUARVVVVALBYDZmdngYaGBmBs"
"bAw4ODgAWltb+TcO8wlqamr0jRAIjXAkEgkUQUtLC3B6eooieHt7++ZaKALJJlNkzOz1ejVB"
"V1cXcHd3B5SVlekY47ty/brMJ/hb5tvbW6C2tlY/SSaTQHNzs34i3ZI5PD4+BuLxOJBKpYD7"
"+3sKQfDhxmZDTff29jYwMjKC6sTT05OubnFxETVR0onq6mqgrq4O8Pl8gMvlKiCBKJLdbgci"
"kQjQ1NQEOJ1OIBwOA4eHh8D6+jrQ3t4OjI+PA5WVld8kMJ9Adlxql3nY3NwE5ufndZDf70fR"
"TExMABcXF8DDwwOqB7Lj+XweeHx8BK6urgpBYJH8R0dHKO3s7OwEhoaGdNDZ2RmqNx6PR9PI"
"qZZ6d3Z2UOdfOHK5XCEIbJI5GAyinKunp+dTkFQkJ7m3txfo6OjQNcpbu7u7wMDAAFBfX4/q"
"jfkEk5OTKLcSlS8tLf0UtL+/D/T39wOrq6t8VKq9vT3U+TfW/u53phNMT0+jJlo8S3ZWlihi"
"d3c3sLa2BgQCARRlNpvVV6PKijOKsplPIFUvLy/r2jOZDGo2pFKjS4sWjY6OolxM4t1ut/5T"
"o8eYTzAzMwOcnJwAjY2NqB3s6+vjo0vLb4fDASwsLOhKhSkUCv2u2vClZD7B3NwcyptEiy4v"
"L1HzMzw8rEOlHxUVFcDg4CBwfX0NbGxsAOXl5X9MYL6aynl71w3D3kWjUWBrawul9Tc3N8D5"
"+TnKgeU8i29Lz75+rZpO8AsCJE578O/FcgAAAABJRU5ErkJggg==")
TheCrackOfDawn = PyEmbeddedImage(
"iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAIAAAD8GO2jAAAAA3NCSVQICAjb4U/gAAAGAElE"
"QVRIiXVWu44lWRGMiMw8VXX7NQ8WBxuPT0DiQ8HgE/YzkLAwQLhYOGjZnZmd7nurTgZGdQ8S"
"EscqnarKjIxIZQaR38EEQAY4YJJJqF2gibQOS4DAAgAQEGkz5Av8einSJMQGBbhN7d1I6oEM"
"ihMMrQ0BCWTwOknEIm9uS3FIsMgk2TA5GqRIpM2GQFASAAAGOGmmcpUF0ZSRVKDLzM4FyEZC"
"SQEMEGQCRQ7B4BADMEkyGrIIkqYB0vCglc0Lg9iHBiaDAMZiBBDmIEkvEM1gkqa42INUI6wB"
"tpBkSQTY3VHZ4ARskkjEpU2Oi3WYYSVYUMlJFCRjUEklKUNmkQNGaFCbQTLgNI2UpG6YIFSR"
"AJJxgTG1gUhtRjSLqhbE1QC1SnVMKAY4mNEIScaAygCigARMSaGGaWZENwCk4x5KYg0JWB1J"
"LnBkDrsUIQyIoxanyAJDDMYZXSZVOW1CIYGn7AApAkYynxxBbBMWLqphFpGqDRgmlWWSMVoF"
"igoTjLKGEAQhhU6FAQlBvqYggLQ+MMteWWGujnRkxnAW8ComYiDpXEkiJNIgY1BxxhGoYgOQ"
"KBAwEBEAUus7aGSsjtValGVRIWSBI3MxgSgpZoFGRlLRoQ5RggEFIKeiTOUOpwKcBkgmLx+t"
"wViQ1QrkkhJCXWUlYwQxI8jIFCVKorkExUhBJNUkk0wDWhNiKINtKdLbh6jFoMbiCEYxwqmK"
"ZCp0QkuSGkmKGZnqM9k4x4pIMRAFUipbUnZbIrPu7xRDDOYYY0DkEnOUQipV0rQrsiJHSkJF"
"rpOZDLLC8So7yixKWqIpUtNM48h4vEMwM5UDlRzRCytHBliZS0YJQ5XkwkhpyajJzBSjWsUU"
"bexSVSzEqlyDSQvMrHxYGsvCdXAZymSF1tBCj4jKWhQVHFhHVLFWpLhVhvIyVPSoTHHN+IKp"
"0IfUfeASWoNlh47Mba9q140Zy7rkOupSy0oOjDGS1tB2qaX0NMaS+/t1fSgtg4vnmszFq7iJ"
"txxfbrdfrLobc0sP9UIuifwY/6KRTu09GsuBdc/Lc4zLcrlft8q7zm3PZZd6vF/z/uXzHXQB"
"L4FLjOi5UoOFmT002e++7is7o9XHvN1y+/q3Grk4Hu+2uzGW5NNaTxWX/TY++emybQcuNxVV"
"n3aqkxqVFR7aY3/p5x+fjy8/HftA9jyu6M93tSWjGJelljXf//svygywj3l1O3r2fA49POjj"
"ZXt897A9rk/3l7v7u7zT08eH+8tlLD22qu2CbeOx4Xqb1+v8+uOxv+ja0j73G6L57J9/Rv72"
"Nx8/fPf07vHu8bJsI7YlthGrVHm7LLmXE/P4+nmM41p7xPWFvOkINz89zx+vtRjHz/N2Sz4r"
"bp24XV/i5fY4fvmPr49//MNf89c//P3pNh4+r3fva7lkrMtYq0bVirIenAzmJsyug9B03qA5"
"+3ZDcwI/3J6f9+N57vvt5eW6X+c/v+j7P/30++///DzF5Z6/+9V3to/9ZVTM40hqVH3dbz99"
"3pm8drcg+CHrOv2yz8P8dLv19BU4IKEJATkxD8C0ssBV3A+XsVPjYjhIHC3SgAFJuyRPzQNK"
"gQSmb22SgTbwOpAbbhgEYDFsBdWchbwZAAKRpwcA7LcnAM0Dplq2u9s2HAS/fQAANCUgiADU"
"p+fIgnL2BCgp2UcDBOjXOX7+yumGBZ15GzZNBtsmAdomAh2mAfAceqS7mQkes10RPNfC/znq"
"+cYFOdNE2HqDwLf90gBsixlRc7btCAF9HEf+v9j2hMA4Pd0rRPtbaODcWcZJLBFzzvOyu2fv"
"bgKIb1bsfw4ZYFgBklKfKioiwrbtiLC7PeGwDXDb1tk7TAN2G03ipKhPQG343LESHYAatvqU"
"y6bPEv4rNElmFgC0pxsCJija7nlIwYhoNyVAfTo7EsDJNUkS9ongW60USSqzjuPobgJm8+zy"
"15Y7GwyMCCIMQjJhv1Lcb+EInMAlvZGTZ9pTG3f3KwK4G3x1wN1NMgGedcuwQQOw+crktxxn"
"YSRJzbm/yvz2nqJBHPPVxMMkBYT9H3h01iCgfKKmAAAAAElFTkSuQmCC")
try:
GLOBAL_SETTINGS = pickle.load(open('config', 'r'))
except Exception, err:
GLOBAL_SETTINGS = {'leftclick':59, 'rightclick':60, 'hold':True}
ENTRIES = []
DOWNKEYS = set()
#LOCK = threading.Lock()
def OnHook(event):
scan_code = event.ScanCode
if scan_code == GLOBAL_SETTINGS['leftclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
return False
elif scan_code == GLOBAL_SETTINGS['rightclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)
time.sleep(0.05)
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)
return False
elif scan_code == 55:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
return False
return True
def OnKeyUp(event):
code = event.ScanCode
if code in DOWNKEYS:
if code == GLOBAL_SETTINGS['leftclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
elif code == GLOBAL_SETTINGS['rightclick']:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTUP, 0, 0)
DOWNKEYS.remove(code)
return False
return True
def OnKeyDown(event):
code = event.ScanCode
if code == GLOBAL_SETTINGS['leftclick'] :
#LOCK.acquire()
if not code in DOWNKEYS:
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
DOWNKEYS.add(code)
#LOCK.release()
return False
elif code == GLOBAL_SETTINGS['rightclick'] :
#LOCK.acquire()
if not code in DOWNKEYS:
win32api.mouse_event(win32con.MOUSEEVENTF_RIGHTDOWN, 0, 0)
DOWNKEYS.add(code)
#LOCK.release()
return False
return True
def OnSet(event):
scan_code = event.ScanCode
entry = ENTRIES.pop()
entry.SetValue(str(scan_code))
entry.SetBackgroundColour('#f0f0f0')
entry.Refresh()
ENTRIES.insert(0, entry)
ENTRIES[-1].SetBackgroundColour('green')
ENTRIES[-1].Refresh()
return False
def hook(callback):
hm = pyHook.HookManager()
if isinstance(callback, list):
hm.KeyDown = callback[0]
hm.KeyUp = callback[1]
else:
hm.KeyDown = callback
hm.HookKeyboard()
pythoncom.PumpMessages()
hm.UnhookKeyboard()
class TaskBarIcon(wx.TaskBarIcon):
ID_About = wx.NewId()
ID_Exit = wx.NewId()
ID_Stop = wx.NewId()
ID_Start = wx.NewId()
ID_Setting = wx.NewId()
def __init__(self):
wx.TaskBarIcon.__init__(self)
self.SetIcon(figurefree.getIcon(), 'figuerfreer')
self.Bind(wx.EVT_MENU, self.OnAbout, id=self.ID_About)
self.Bind(wx.EVT_MENU, self.OnExit, id=self.ID_Exit)
self.Bind(wx.EVT_MENU, self.OnStop, id=self.ID_Stop)
self.Bind(wx.EVT_MENU, self.OnStart, id=self.ID_Start)
self.Bind(wx.EVT_MENU, self.OnSetting, id=self.ID_Setting)
self.OnStart(None)
def OnStop(self, event):
win32api.PostThreadMessage(self.hook.ident, 0x12)
def OnStart(self, event):
if GLOBAL_SETTINGS['hold']:
self.hook = threading.Thread(target=hook, args=([OnKeyDown, OnKeyUp],))
else:
self.hook = threading.Thread(target=hook, args=(OnHook,))
self.hook.daemon = True
self.hook.start()
def OnAbout(self, event):
# First we create and fill the info object
painter = wx.Frame(None)
info = wx.AboutDialogInfo()
info.SetIcon(TheCrackOfDawn.getIcon())
info.Name = "figure freer"
info.Version = "1.0"
info.Copyright = "(c) cd"
info.Description = wordwrap(
"you can perform the mouse clicking action by any key you want."
"For now, it meets my need perfectly. So no more time will be paid."
"If you have any need, you are free to change or ever rewrite it.",
350, wx.ClientDC(painter))
info.WebSite = ("https://github.com/thecrackofdawn/figure-freer", "figure freer's source code")
info.Developers = ["weibo : TheCrackOfDawn",
"hoping more and more attention ^_^"]
licenseText = "MIT. You are free to do whatever you want."
info.License = wordwrap(licenseText, 500, wx.ClientDC(painter))
# Then we call wx.AboutBox giving it that info object
wx.AboutBox(info)
painter.Destroy()
def OnSetting(self, event):
alive = False
if self.hook.is_alive():
alive = True
self.OnStop(None)
dialog = SettingDialog()
result = dialog.ShowModal()
if result == wx.ID_OK:
GLOBAL_SETTINGS['leftclick'] = int(dialog.leftClick.GetValue())
GLOBAL_SETTINGS['rightclick'] = int(dialog.rightClick.GetValue())
pickle.dump(GLOBAL_SETTINGS, open('config', 'w'))
dialog.Stop()
dialog.Destroy()
if alive:
self.OnStart(None)
def OnExit(self, event):
self.RemoveIcon()
self.Destroy()
def CreatePopupMenu(self):
menu = wx.Menu()
menu.Append(self.ID_About, 'about')
if self.hook.is_alive():
menu.Append(self.ID_Stop, 'stop')
else:
menu.Append(self.ID_Start, 'start')
menu.Append(self.ID_Setting, 'settings')
menu.Append(self.ID_Exit, 'exit')
return menu
class SettingDialog(wx.Dialog):
def __init__(self):
global ENTRIES
wx.Dialog.__init__(self, None, -1, 'settings', size=(250, 200))
panel = wx.Panel(self)
wx.StaticText(panel, -1, 'left click:', pos=(20, 30))
self.leftClick = wx.TextCtrl(panel, -1, pos=(100, 27), value=str(GLOBAL_SETTINGS['leftclick']), style=wx.TE_READONLY)
wx.StaticText(panel, -1, 'right click:', pos=(20, 70))
self.rightClick = wx.TextCtrl(panel, -1, pos=(100, 67), value=str(GLOBAL_SETTINGS['rightclick']), style=wx.TE_READONLY)
self.hold = wx.CheckBox(panel, -1, 'simulate holding', pos=(60, 110))
self.hold.SetValue(GLOBAL_SETTINGS['hold'])
self.hold.Bind(wx.EVT_CHECKBOX, self.OnCheckBox)
okButton = wx.Button(panel, wx.ID_OK, 'OK', pos=(20, 140))
okButton.SetDefault()
cancelButton = wx.Button(panel, wx.ID_CANCEL, 'cancel', pos=(140, 140))
ENTRIES = [self.rightClick, self.leftClick]
ENTRIES[-1].SetBackgroundColour('green')
self.Start()
def OnCheckBox(self, event):
GLOBAL_SETTINGS['hold'] = self.hold.GetValue()
def Stop(self):
win32api.PostThreadMessage(self.hook.ident, 0x12)
def Start(self):
self.hook = threading.Thread(target=hook, args=(OnSet,))
self.hook.daemon = True
self.hook.start()
def run():
app = wx.App()
taskBar = TaskBarIcon()
app.MainLoop()
if __name__ == '__main__':
run()
|
|
#!/usr/bin/env python
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Trove Management Command line tool
"""
import json
import os
import sys
# If ../trove/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'troveclient',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from troveclient import common
oparser = None
def _pretty_print(info):
print json.dumps(info, sort_keys=True, indent=4)
class HostCommands(common.AuthedCommandsBase):
"""Commands to list info on hosts"""
params = [
'name',
]
def update_all(self):
"""Update all instances on a host"""
self._require('name')
self.dbaas.hosts.update_all(self.name)
def get(self):
"""List details for the specified host"""
self._require('name')
self._pretty_print(self.dbaas.hosts.get, self.name)
def list(self):
"""List all compute hosts"""
self._pretty_list(self.dbaas.hosts.index)
class QuotaCommands(common.AuthedCommandsBase):
"""List and update quota limits for a tenant."""
params = ['id',
'instances',
'volumes',
'backups']
def list(self):
"""List all quotas for a tenant"""
self._require('id')
self._pretty_print(self.dbaas.quota.show, self.id)
def update(self):
"""Update quota limits for a tenant"""
self._require('id')
self._pretty_print(self.dbaas.quota.update, self.id,
dict((param, getattr(self, param))
for param in self.params if param != 'id'))
class RootCommands(common.AuthedCommandsBase):
"""List details about the root info for an instance."""
params = [
'id',
]
def history(self):
"""List root history for the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.root_enabled_history, self.id)
class AccountCommands(common.AuthedCommandsBase):
"""Commands to list account info"""
params = [
'id',
]
def list(self):
"""List all accounts with non-deleted instances"""
self._pretty_print(self.dbaas.accounts.index)
def get(self):
"""List details for the account provided"""
self._require('id')
self._pretty_print(self.dbaas.accounts.show, self.id)
class InstanceCommands(common.AuthedCommandsBase):
"""List details about an instance."""
params = [
'deleted',
'id',
'limit',
'marker',
'host',
]
def get(self):
"""List details for the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.show, self.id)
def list(self):
"""List all instances for account"""
deleted = None
if self.deleted is not None:
if self.deleted.lower() in ['true']:
deleted = True
elif self.deleted.lower() in ['false']:
deleted = False
self._pretty_paged(self.dbaas.management.index, deleted=deleted)
def hwinfo(self):
"""Show hardware information details about an instance."""
self._require('id')
self._pretty_print(self.dbaas.hwinfo.get, self.id)
def diagnostic(self):
"""List diagnostic details about an instance."""
self._require('id')
self._pretty_print(self.dbaas.diagnostics.get, self.id)
def stop(self):
"""Stop MySQL on the given instance."""
self._require('id')
self._pretty_print(self.dbaas.management.stop, self.id)
def reboot(self):
"""Reboot the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.reboot, self.id)
def migrate(self):
"""Migrate the instance."""
self._require('id')
self._pretty_print(self.dbaas.management.migrate, self.id, self.host)
def reset_task_status(self):
"""Set the instance's task status to NONE."""
self._require('id')
self._pretty_print(self.dbaas.management.reset_task_status, self.id)
class StorageCommands(common.AuthedCommandsBase):
"""Commands to list devices info"""
params = []
def list(self):
"""List details for the storage device"""
self._pretty_list(self.dbaas.storage.index)
class FlavorsCommands(common.AuthedCommandsBase):
"""Commands for managing Flavors"""
params = [
'name',
'ram',
'disk',
'vcpus',
'flavor_id',
'ephemeral',
'swap',
'rxtx_factor',
'service_type'
]
def create(self):
"""Create a new flavor"""
self._require('name', 'ram', 'disk', 'vcpus',
'flavor_id', 'service_type')
self._pretty_print(self.dbaas.mgmt_flavor.create, self.name,
self.ram, self.disk, self.vcpus, self.flavor_id,
self.ephemeral, self.swap, self.rxtx_factor,
self.service_type)
def config_options(oparser):
oparser.add_option("-u", "--url", default="http://localhost:5000/v1.1",
help="Auth API endpoint URL with port and version. \
Default: http://localhost:5000/v1.1")
COMMANDS = {
'account': AccountCommands,
'host': HostCommands,
'instance': InstanceCommands,
'root': RootCommands,
'storage': StorageCommands,
'quota': QuotaCommands,
'flavor': FlavorsCommands,
}
def main():
# Parse arguments
oparser = common.CliOptions.create_optparser(True)
for k, v in COMMANDS.items():
v._prepare_parser(oparser)
(options, args) = oparser.parse_args()
if not args:
common.print_commands(COMMANDS)
# Pop the command and check if it's in the known commands
cmd = args.pop(0)
if cmd in COMMANDS:
fn = COMMANDS.get(cmd)
command_object = None
try:
command_object = fn(oparser)
except Exception as ex:
if options.debug:
raise
print(ex)
# Get a list of supported actions for the command
actions = common.methods_of(command_object)
if len(args) < 1:
common.print_actions(cmd, actions)
# Check for a valid action and perform that action
action = args.pop(0)
if action in actions:
try:
getattr(command_object, action)()
except Exception as ex:
if options.debug:
raise
print ex
else:
common.print_actions(cmd, actions)
else:
common.print_commands(COMMANDS)
if __name__ == '__main__':
main()
|
|
"""
Django settings for kaistusc project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import django_jinja.builtins
import pymysql
pymysql.install_as_MySQLdb()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_dcwta0d49azy*1##hy*j*g)s8d3&q88q(eushtfqrx&ff#auw'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [u'143.248.234.160', u'localhost', u'student.kaist.ac.kr', u'kaistusc.hangpark.com']
CSRF_TRUSTED_ORIGINS = [u'143.248.234.160', u'localhost', u'student.kaist.ac.kr', u'kaistusc.hangpark.com']
# Application definition
INSTALLED_APPS = [
'apps.manager',
'apps.ksso',
'apps.board',
'apps.rule',
'apps.ot',
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_jinja',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.locale.LocaleMiddleware',
'middlewares.locale.SessionBasedLocaleMiddleware',
]
ROOT_URLCONF = 'kaistusc.urls'
TEMPLATES = [
{
'BACKEND': 'django_jinja.backend.Jinja2',
'APP_DIRS': True,
'OPTIONS': {
'match_extension': '.jinja',
'extensions': django_jinja.builtins.DEFAULT_EXTENSIONS,
'context_processors': [
'apps.manager.context_processors.development_ip',
]
},
},
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kaistusc.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'kaistusc',
'USER': os.getenv('MYSQL_USER'),
'PASSWORD': os.getenv('MYSQL_PASSWORD'),
'HOST': os.getenv('MYSQL_HOST') or 'db',
'PORT': '3306'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
LANGUAGES = (
('ko', 'Korean'),
('en', 'English'),
)
USE_I18N = True
USE_L10N = True
USE_TZ = False
TIME_ZONE = 'Asia/Seoul'
LOCALE_PATHS = [
os.path.join(BASE_DIR, "locale"),
]
MODELTRANSLATION_FALLBACK_LANGUAGES = ('ko', 'en')
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static/dist"),
os.path.join(BASE_DIR, "node_modules/pdfjs-dist"),
)
STATIC_ROOT = '/var/www/static'
MEDIA_URL = '/files/'
MEDIA_ROOT = '/var/www/media'
# Session Management
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Authentication
LOGIN_URL = '/user/login/'
# http://www.django-rest-framework.org/
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
),
}
# Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'file': {
'format': '[%(asctime)s] (%(levelname)s) %(message)s',
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'file': {
'level': 'WARNING',
'formatter': 'file',
'filters': ['require_debug_false'],
'class': 'logging.handlers.WatchedFileHandler',
'filename': os.path.join(BASE_DIR, 'kaistusc.log'),
},
},
'loggers': {
'django': {
'handlers': ['console', 'file'],
'level': 'INFO',
},
'django.request': {
'handlers': ['console', 'file'],
'level': 'WARNING',
'propagate': False,
},
}
}
try:
from .local_settings import *
except ImportError:
pass
|
|
from .trainer import Trainer
import numpy as np
import torch
from torch.autograd import Variable
class WassersteinGanTrainer(Trainer):
"""
Args:
network (nn.Module): the network to train
latent_dimension (tuple): A tuple that defines the shape of the latent
dimension (noise) that is the generator's input
n_critic_iterations (int): The number of minibatches the critic sees
for every minibatch the generator sees
epochs: The total number of passes over the training set
batch_size: The size of a minibatch
preprocess_minibatch (function): function that takes the current
epoch, and a minibatch, and mutates the minibatch
kwargs_factory (callable): function that takes the current epoch and
outputs args to pass to the generator and discriminator
"""
def __init__(
self,
network,
latent_dimension,
n_critic_iterations,
epochs,
batch_size,
preprocess_minibatch=None,
kwargs_factory=None,
debug_gradient=False,
checkpoint_epochs=1):
super(WassersteinGanTrainer, self).__init__(epochs, batch_size)
self.checkpoint_epochs = checkpoint_epochs
self.debug_gradient = debug_gradient
self.arg_maker = kwargs_factory
self.preprocess = preprocess_minibatch
self.n_critic_iterations = n_critic_iterations
self.latent_dimension = latent_dimension
self.network = network
self.critic = network.discriminator
self.generator = network.generator
self.samples = None
self.register_batch_complete_callback(self._log)
self.generator_optim = None
self.critic_optim = None
def _log(self, *args, **kwargs):
if kwargs['batch'] % 10:
return
msg = 'Epoch {epoch}, batch {batch}, generator {generator_score}, ' \
'real {real_score}, critic {critic_loss}'
print(msg.format(**kwargs))
def _minibatch(self, data):
indices = np.random.randint(0, len(data), self.batch_size)
return data[indices, ...]
def _gradient_penalty(self, real_samples, fake_samples, kwargs):
"""
Compute the norm of the gradients for each sample in a batch, and
penalize anything on either side of unit norm
"""
import torch
from torch.autograd import Variable, grad
real_samples = real_samples.view(fake_samples.shape)
subset_size = real_samples.shape[0]
real_samples = real_samples[:subset_size]
fake_samples = fake_samples[:subset_size]
alpha = torch.rand(subset_size)
if self.use_cuda:
alpha = alpha.cuda()
alpha = alpha.view((-1,) + ((1,) * (real_samples.dim() - 1)))
interpolates = alpha * real_samples + ((1 - alpha) * fake_samples)
interpolates = Variable(interpolates, requires_grad=True)
if self.use_cuda:
interpolates = interpolates.cuda()
d_output = self.critic(interpolates, **kwargs)
grad_ouputs = torch.ones(d_output.size())
if self.use_cuda:
grad_ouputs = grad_ouputs.cuda()
gradients = grad(
outputs=d_output,
inputs=interpolates,
grad_outputs=grad_ouputs,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
return ((gradients.norm(2, dim=1) - 1) ** 2).mean() * 10
def freeze_generator(self):
for p in self.generator.parameters():
p.requires_grad = False
def unfreeze_generator(self):
for p in self.generator.parameters():
p.requires_grad = True
def freeze_discriminator(self):
for p in self.critic.parameters():
p.requires_grad = False
def unfreeze_discriminator(self):
for p in self.critic.parameters():
p.requires_grad = True
def _debug_network_gradient(self, network):
if not self.debug_gradient:
return
for n, p in network.named_parameters():
g = p.grad
if g is not None:
print((n, g.min().data[0], g.max().data[0], g.mean().data[0]))
def zero_generator_gradients(self):
self._debug_network_gradient(self.generator)
self.generator.zero_grad()
def zero_discriminator_gradients(self):
self._debug_network_gradient(self.critic)
self.critic.zero_grad()
def _init_optimizers(self):
if self.generator_optim is None or self.critic_optim is None:
from torch.optim import Adam
trainable_generator_params = (
p for p in self.generator.parameters() if p.requires_grad)
trainable_critic_params = (
p for p in self.critic.parameters() if p.requires_grad)
self.generator_optim = Adam(
trainable_generator_params, lr=0.0001, betas=(0, 0.9))
self.critic_optim = Adam(
trainable_critic_params, lr=0.0001, betas=(0, 0.9))
def _cuda(self, device=None):
self.generator = self.generator.cuda()
self.critic = self.critic.cuda()
def train(self, data):
self.network.train()
self.unfreeze_discriminator()
self.unfreeze_generator()
data = data.astype(np.float32)
noise_shape = (self.batch_size,) + self.latent_dimension
noise = self._tensor(noise_shape)
self._init_optimizers()
start = self._current_epoch
stop = self._current_epoch + self.checkpoint_epochs
for epoch in range(start, stop):
if epoch >= self.epochs:
break
if self.arg_maker:
kwargs = self.arg_maker(epoch)
else:
kwargs = dict()
for i in range(0, len(data), self.batch_size):
self.zero_generator_gradients()
self.zero_discriminator_gradients()
self.freeze_generator()
self.unfreeze_discriminator()
for c in range(self.n_critic_iterations):
self.zero_discriminator_gradients()
input_v = self._variable(self._minibatch(data))
if self.preprocess:
input_v = self.preprocess(epoch, input_v)
d_real = self.critic.forward(input_v, **kwargs)
# train discriminator on fake data
noise.normal_(0, 1)
noise_v = Variable(noise, volatile=True)
fake = Variable(
self.generator.forward(noise_v, **kwargs).data)
if self.preprocess:
fake = self.preprocess(epoch, fake)
d_fake = self.critic.forward(fake, **kwargs)
real_mean = torch.mean(d_real)
fake_mean = torch.mean(d_fake)
gp = self._gradient_penalty(input_v.data, fake.data, kwargs)
d_loss = (fake_mean - real_mean) + gp
d_loss.backward()
self.critic_optim.step()
self.zero_discriminator_gradients()
self.zero_generator_gradients()
self.unfreeze_generator()
self.freeze_discriminator()
# train generator
noise.normal_(0, 1)
noise_v = Variable(noise)
fake = self.generator.forward(noise_v, **kwargs)
if self.preprocess:
fake = self.preprocess(epoch, fake)
self.samples = fake
d_fake = self.critic.forward(fake, **kwargs)
g_loss = -torch.mean(d_fake)
g_loss.backward()
self.generator_optim.step()
gl = g_loss.data.item()
dl = d_loss.data.item()
rl = real_mean.data.item()
self.on_batch_complete(
epoch=epoch,
batch=i,
generator_score=gl,
real_score=rl,
critic_loss=dl,
samples=self.samples,
network=self.network)
self._current_epoch += 1
return self.network
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: set ts=2 sw=2 noet:
'''Various string representations of trees.'''
import pstree, parse_errors
def text_words(tree, show_traces=False):
'''Print just the words in the tree.'''
text = []
for node in tree:
if node.is_terminal():
if node.is_trace() and not show_traces:
continue
text.append(node.word)
return ' '.join(text)
def text_POS_tagged(tree, show_traces=False):
'''Print words and part of speech tags in the tree.'''
text = []
for node in tree:
if node.is_terminal():
if node.is_trace() and not show_traces:
continue
text.append(tree.word + '|' + tree.label)
return ' '.join(text)
def text_tree(tree, single_line=True, show_traces=False, depth=0):
ans = ''
if not single_line and depth > 0:
ans = '\n' + depth * '\t'
ans += '(' + tree.label
if tree.word is not None:
ans += ' ' + tree.word
for subtree in tree.subtrees:
if single_line:
ans += ' '
ans += text_tree(subtree, single_line, show_traces, depth + 1)
ans += ')'
return ans
def text_ontonotes(tree, filename='filename', words=None, tree_text=None, depth=0):
resolve = False
if words is None:
resolve = True
words = []
tree_text = ''
if tree.word is None:
tree_text += '(' + tree.label + '_'
else:
words.append((tree.word, tree.label))
tree_text += '*'
for subtree in tree.subtrees:
tree_text = text_ontonotes(subtree, filename, words, tree_text, depth)
if tree.word is None:
tree_text += ')'
if resolve:
ans = ''
cpos = 0
cword = 0
while cpos < len(tree_text):
ctext = ''
while cpos < len(tree_text) and tree_text[cpos] != '*':
ctext += tree_text[cpos]
cpos += 1
ctext += tree_text[cpos]
cpos += 1
while cpos < len(tree_text) and tree_text[cpos] == ')':
ctext += tree_text[cpos]
cpos += 1
ans += '%s %9s %9d %9s %9s %9s' % (filename, 0, cword, words[cword][0], words[cword][1], ctext)
for val in ['-', '-', '-', '-', '*', '*', '*', '*', '*', '*', '-']:
ans += ' %9s' % val
ans += '\n'
cword += 1
return ans
else:
return tree_text
def tex_synttree(tree, other_spans=None, depth=0, compressed=True, span=None):
if tree.label == '.':
return ''
if span is not None and (tree.span[1] <= span[0] or tree.span[0] >= span[1]):
return ''
correct = True
if other_spans is not None:
correct = (tree.label, tree.span[0], tree.span[1]) in other_spans
else:
compressed = False
all_in_subtree = False
if span is not None:
for subtree in tree.subtrees:
if subtree.span[0] <= span[0] and span[1] <= subtree.span[1]:
all_in_subtree = True
# Clean the label and word
label = tree.label
if '$' in label:
label = '\$'.join(label.split('$'))
word = tree.word
if word is not None:
word = ''.join(word.split('.'))
word = '\&'.join(word.split('&'))
word = '\$'.join(word.split('$'))
# Make the text
ans = ''
if tree.parent is None:
ans += '\synttree'
if not all_in_subtree:
ans += '\n'
elif not all_in_subtree:
ans += '\n' + ' ' * depth
if len(tree.subtrees) == 0:
ans += '[%s [%s]]' % (label, word)
else:
if not all_in_subtree:
if correct:
ans += '[%s' % (label)
else:
ans += '[\wrongnode{%s}' % (label)
for subtree in tree.subtrees:
ans += tex_synttree(subtree, other_spans, depth + 1, compressed, span)
if not all_in_subtree:
ans += ']'
# When compressing we only want errors visible
if compressed and 'wrongnode' not in ans and tree.word is None:
words = ''.join(tree.word_yield().split('.'))
words = '\&'.join(words.split('&'))
words = '\$'.join(words.split('$'))
if tree.parent is None:
ans = '\synttree\n'
else:
ans = '\n' + ' ' * depth
ans += '[%s [.t %s]]' % (label, words)
return ans
def text_coloured_errors(tree, gold=None, depth=0, single_line=False, missing=None, extra=None, compressed=True, POS=True):
'''Pretty print, with errors marked using colour.
'missing' should contain tuples (or be None):
(start, end, label, crossing-T/F)
'''
ans = ''
if missing is None or extra is None:
if gold is None:
return "Error - no gold tree and no missing list for colour repr"
# look at gold and work out what missing should be
errors = parse_errors.get_errors(tree, gold, POS)
extra = [e[3] for e in errors if e[0] == 'extra' and e[3].word is None]
extra = set(extra)
missing = [(e[1][0], e[1][1], e[2], False) for e in errors if e[0] == 'missing' and e[3].word is None]
missing += [(e[1][0], e[1][1], e[2], True) for e in errors if e[0] == 'crossing' and e[3].word is None]
POS = [e for e in errors if e[0] == 'diff POS']
start_missing = "\033[01;36m"
start_extra = "\033[01;31m"
start_crossing = "\033[01;33m"
end_colour = "\033[00m"
if not single_line:
ans += '\n' + depth * '\t'
# start of this
if tree in extra:
ans += start_extra + '(' + tree.label + end_colour
elif tree.word is not None and POS is not None:
found = False
for error in POS:
if error[3] == tree:
found = True
ans += '(' + start_missing + error[4] + end_colour
ans += ' ' + start_extra + tree.label + end_colour
break
if not found:
ans += '(' + tree.label
else:
ans += '(' + tree.label
# If we are compressing, check for correctness and then just print words
sub_done = False
if compressed and tree not in extra and tree.word is None:
all_right = True
for error in extra:
if tree.span[0] <= error.span[0] and error.span[1] <= tree.span[1]:
all_right = False
break
for error in missing:
if error[3]:
if tree.span[0] < error[0] < tree.span[1]:
all_right = False
break
if tree.span[0] < error[1] < tree.span[1]:
all_right = False
break
elif tree.span[0] <= error[0] and error[1] <= tree.span[1]:
all_right = False
break
if POS is not None:
for error in POS:
if tree.span[0] <= error[1][0] and error[1][1] <= tree.span[1]:
all_right = False
break
if all_right:
ans += ' ' + text_words(tree) + ')'
sub_done = True
# crossing brackets starting
if tree.parent is None or tree.parent.subtrees[0] != tree:
# these are marked as high as possible
labels = []
for error in missing:
if error[0] == tree.span[0] and error[3]:
labels.append((error[1], error[2]))
labels.sort(reverse=True)
if len(labels) > 0:
to_add = start_crossing + ' '.join(['(' + label[1] for label in labels]) + end_colour
if sub_done:
nans = ''
for char in ans:
if char in '\t\n':
nans += char
clen = len(nans)
nans += to_add
nans += ' ' + ans[clen:]
ans = nans
else:
ans += ' ' + to_add
if not sub_done:
# word
if tree.word is not None:
ans += ' ' + tree.word
# subtrees
below = []
for subtree in tree.subtrees:
text = text_coloured_errors(subtree, gold, depth + 1, single_line, missing, extra, compressed, POS)
if single_line:
text = ' ' + text
below.append([subtree.span[0], subtree.span[1], text])
# add missing brackets that surround subtrees
for length in xrange(1, len(below)):
for i in xrange(len(below)):
j = i + length
if i == 0 and j == len(below) - 1:
continue
if j >= len(below):
break
for error in missing:
if below[i][0] == error[0] and below[j][1] == error[1] and not error[3]:
start = ''
for char in below[i][2]:
if char not in '\n\t':
break
start += char
for k in xrange(i, j+1):
below[k][2] = '\n\t'.join(below[k][2].split('\n'))
below[i][2] = start + start_missing + '(' + error[2] + end_colour + below[i][2]
below[j][2] += start_missing + ')' + end_colour
ans += ''.join([part[2] for part in below])
# end of this
if tree in extra:
ans += start_extra + ')' + end_colour
else:
ans += ')'
if tree.parent is None or tree.parent.subtrees[-1] != tree:
# if there are crossing brackets that end here, mark that
labels = []
for error in missing:
if error[1] == tree.span[1] and error[3]:
labels.append((-error[0], error[2]))
labels.sort()
if len(labels) > 0:
ans += ' ' + start_crossing + ' '.join([label[1] + ')' for label in labels]) + end_colour
if tree.parent is None or len(tree.parent.subtrees) > 1:
# check for missing brackets that go around this node
for error in missing:
if error[0] == tree.span[0] and error[1] == tree.span[1] and not error[3]:
if not tree in extra:
# Put them on a new level
extra_text = ''
if not single_line:
ans = '\n\t'.join(ans.split('\n'))
extra_text = '\n' + depth * '\t'
extra_text += start_missing + '(' + error[2] + end_colour
if single_line:
ans = ' ' + ans
ans = extra_text + ans
ans += start_missing + ')' + end_colour
else:
# Put them on the same line
start = 0
for char in ans:
if char not in '\n\t':
break
start += 1
pretext = ans[:start]
ans = ans[start:]
extra_text = start_missing + '(' + error[2] + end_colour + ' '
ans = pretext + extra_text + ans
ans += start_missing + ')' + end_colour
return ans
def cut_text_below(text, depth):
'''Simplify text to only show the top parts of a tree
>>> print cut_text_below("(ROOT (NP (PRP I)) (VP (VBD ran) (NP (NN home))))", 1)
(ROOT)
>>> print cut_text_below("(ROOT (NP (PRP I)) (VP (VBD ran) (NP (NN home))))", 2)
(ROOT (NP) (VP))
>>> print cut_text_below("(ROOT (NP (PRP I)) (VP (VBD ran) (NP (NN home))))", 3)
(ROOT (NP (PRP I)) (VP (VBD ran) (NP)))
>>> print cut_text_below("(ROOT (NP (PRP I)) (VP (VBD ran) (NP (NN home))))", 20)
(ROOT (NP (PRP I)) (VP (VBD ran) (NP (NN home))))
'''
# Cut lower content
cdepth = 0
ntext = ''
for char in text:
if char == '(':
cdepth += 1
if cdepth <= depth:
ntext += char
if char == ')':
cdepth -= 1
# Walk back and remove extra whitespace
text = ntext
ntext = ''
ignore = False
for char in text[::-1]:
if char == ')':
ignore = True
ntext += char
elif ignore:
if char != ' ':
ntext += char
ignore = False
else:
ntext += char
return ntext[::-1]
if __name__ == '__main__':
print "Running doctest"
import doctest
doctest.testmod()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import with_statement
import argparse as _argparse
import os as _os
import plano as _plano
import shlex as _shlex
import subprocess as _subprocess
import time as _time
import traceback as _traceback
from .common import *
from .common import __version__
_description = """
Benchmark message sender, receiver, and server combinations.
'quiver-bench' is one of the Quiver tools for testing the performance
of message servers and APIs.
"""
_epilog = """
The --include-* and --exclude-* arguments take comma-separated lists
of implementation names. Use 'quiver-arrow --help' and
'quiver-server --help' to list the available implementations.
"""
class QuiverBenchCommand(Command):
def __init__(self, home_dir):
super(QuiverBenchCommand, self).__init__(home_dir)
self.parser.description = _description.lstrip()
self.parser.epilog = _epilog.lstrip()
self.parser.add_argument("--output", metavar="DIR",
help="Save output files to DIR")
self.parser.add_argument("--include-senders", metavar="IMPLS",
help="Test only senders in IMPLS",
default="all")
self.parser.add_argument("--include-receivers", metavar="IMPLS",
help="Test only receivers in IMPLS",
default="all")
self.parser.add_argument("--include-servers", metavar="IMPLS",
help="Test only servers in IMPLS",
default="all")
self.parser.add_argument("--exclude-senders", metavar="IMPLS",
help="Do not test senders in IMPLS",
default="none")
self.parser.add_argument("--exclude-receivers", metavar="IMPLS",
help="Do not test receivers in IMPLS",
default="none")
self.parser.add_argument("--exclude-servers", metavar="IMPLS",
help="Do not test servers in IMPLS",
default="none")
self.parser.add_argument("--client-server", action="store_true",
help="Test only client-server mode")
self.parser.add_argument("--peer-to-peer", action="store_true",
help="Test only peer-to-peer mode")
self.parser.add_argument("--mixed-pairs", action="store_true",
help="Test unmatched senders and receivers")
self.add_common_test_arguments()
self.add_common_tool_arguments()
def init(self):
super(QuiverBenchCommand, self).init()
self.output_dir = self.args.output
if self.output_dir is None:
prefix = _plano.program_name()
datestamp = _time.strftime('%Y-%m-%d', _time.localtime())
self.output_dir = "{}-{}".format(prefix, datestamp)
_plano.make_dir(self.output_dir)
self.client_server = True
self.peer_to_peer = True
if self.args.client_server:
self.peer_to_peer = False
if self.args.peer_to_peer:
self.client_server = False
self.mixed_pairs = self.args.mixed_pairs
self.init_impl_attributes()
self.init_common_test_attributes()
self.init_common_tool_attributes()
if not self.verbose:
_plano.enable_logging("warn")
self.failures = list()
def init_impl_attributes(self):
sender_impls = set(ARROW_IMPLS)
receiver_impls = set(ARROW_IMPLS)
server_impls = set(SERVER_IMPLS)
if self.args.include_senders != "all":
sender_impls = self.parse_impls(self.args.include_senders)
if self.args.include_receivers != "all":
receiver_impls = self.parse_impls(self.args.include_receivers)
if self.args.include_servers != "all":
server_impls = self.parse_impls(self.args.include_servers)
if self.args.exclude_senders != "none":
sender_impls -= self.parse_impls(self.args.exclude_senders)
if self.args.exclude_receivers != "none":
receiver_impls -= self.parse_impls(self.args.exclude_receivers)
if self.args.exclude_servers != "none":
server_impls -= self.parse_impls(self.args.exclude_servers)
for name in list(sender_impls):
if not impl_available(name):
_plano.notice("Sender '{}' is unavailable", name)
sender_impls.remove(name)
for name in list(receiver_impls):
if not impl_available(name):
_plano.notice("Receiver '{}' is unavailable", name)
receiver_impls.remove(name)
for name in list(server_impls):
if not impl_available(name):
_plano.notice("Server '{}' is unavailable", name)
server_impls.remove(name)
self.sender_impls = sorted(sender_impls)
self.receiver_impls = sorted(receiver_impls)
self.server_impls = sorted(server_impls)
def parse_impls(self, value):
return {x for x in value.split(",")}
def run(self):
if self.client_server:
for sender_impl in self.sender_impls:
for receiver_impl in self.receiver_impls:
if not self.mixed_pairs and sender_impl != receiver_impl:
continue
if sender_impl in OPENWIRE_ARROW_IMPLS:
if receiver_impl not in OPENWIRE_ARROW_IMPLS:
continue
if receiver_impl in OPENWIRE_ARROW_IMPLS:
if sender_impl not in OPENWIRE_ARROW_IMPLS:
continue
if sender_impl in CORE_PROTOCOL_ARROW_IMPLS:
if receiver_impl not in CORE_PROTOCOL_ARROW_IMPLS:
continue
if receiver_impl in CORE_PROTOCOL_ARROW_IMPLS:
if sender_impl not in CORE_PROTOCOL_ARROW_IMPLS:
continue
for server_impl in self.server_impls:
if sender_impl in OPENWIRE_ARROW_IMPLS:
if server_impl not in OPENWIRE_SERVER_IMPLS:
continue
if sender_impl in CORE_PROTOCOL_ARROW_IMPLS:
if server_impl not in CORE_PROTOCOL_SERVER_IMPLS:
continue
self.run_test(sender_impl, server_impl, receiver_impl)
if self.peer_to_peer:
for sender_impl in self.sender_impls:
if sender_impl in OPENWIRE_ARROW_IMPLS:
continue
if sender_impl in CORE_PROTOCOL_ARROW_IMPLS:
continue
for receiver_impl in self.receiver_impls:
if not self.mixed_pairs:
if sender_impl != receiver_impl:
continue
if receiver_impl not in PEER_TO_PEER_ARROW_IMPLS:
continue
self.run_test(sender_impl, None, receiver_impl)
print("Test failures: {}".format(len(self.failures)))
for failure in self.failures:
print(failure) # Need summary
if len(self.failures) > 0:
_plano.exit(1)
def run_test(self, sender_impl, server_impl, receiver_impl):
peer_to_peer = server_impl is None
port = _plano.random_port()
server = None
if server_impl == "activemq":
if sender_impl == "activemq-jms" and receiver_impl == "activemq-jms":
port = 61616
else:
port = 5672
if peer_to_peer:
summary = "{} -> {} ".format(sender_impl, receiver_impl)
server_name = "none"
else:
summary = "{} -> {} -> {} ".format(sender_impl, server_impl, receiver_impl)
server_name = server_impl
test_dir = _plano.join(self.output_dir, sender_impl, server_name, receiver_impl)
pair_dir = _plano.join(test_dir, "pair")
server_dir = _plano.join(test_dir, "server")
pair = _TestPair(self, pair_dir, sender_impl, receiver_impl, peer_to_peer)
if not peer_to_peer:
server = _TestServer(server_dir, server_impl)
if not self.verbose and not self.quiet:
print("{:.<111} ".format(summary), end="")
_plano.flush()
if server is not None:
try:
server.start(port)
except _Timeout as e:
self.failures.append(str(e)) # XXX capture the combo
if self.verbose:
_plano.error(str(e))
else:
print("FAILED")
if server is not None:
server.print_summary()
try:
pair.run(port, self.args)
if not self.verbose and not self.quiet:
print("PASSED")
except KeyboardInterrupt:
raise
except _plano.CalledProcessError as e:
self.failures.append(str(e)) # XXX capture the combo
if self.verbose:
_plano.error(str(e))
elif not self.quiet:
print("FAILED")
pair.print_summary()
if server is not None:
server.print_summary()
except:
_traceback.print_exc()
finally:
_plano.flush()
if server is not None:
server.stop()
self.report(pair, server)
def report(self, pair, server):
pass
class _TestPair:
def __init__(self, command, output_dir, sender_impl, receiver_impl, peer_to_peer):
self.command = command
self.output_dir = output_dir
self.sender_impl = sender_impl
self.receiver_impl = receiver_impl
self.peer_to_peer = peer_to_peer
self.command_file = _plano.join(self.output_dir, "command.txt")
self.output_file = _plano.join(self.output_dir, "output.txt")
self.status_file = _plano.join(self.output_dir, "status.txt")
def run(self, port, args):
_plano.make_dir(self.output_dir)
command = [
"quiver", "//127.0.0.1:{}/q0".format(port),
"--sender", self.sender_impl,
"--receiver", self.receiver_impl,
"--count", args.count,
"--duration", args.duration,
"--body-size", args.body_size,
"--credit", args.credit,
"--timeout", args.timeout,
]
if self.peer_to_peer:
command += ["--peer-to-peer"]
if self.command.verbose:
command += ["--verbose"]
command += [
"--output", self.output_dir,
]
_plano.write(self.command_file, "{}\n".format(" ".join(command)))
with open(self.output_file, "w") as f:
try:
_plano.call(command, stdout=f, stderr=f)
except:
_plano.write(self.status_file, "FAILED\n")
raise
_plano.write(self.status_file, "PASSED\n")
def print_summary(self):
print("--- Test command ---")
print("> {}".format(_plano.read(self.command_file)), end="")
print("--- Test output ---")
for line in _plano.read_lines(self.output_file):
print("> {}".format(line), end="")
class _TestServer:
def __init__(self, output_dir, impl):
self.output_dir = output_dir
self.impl = impl
self.ready_file = _plano.make_temp_file()
self.command_file = _plano.join(self.output_dir, "command.txt")
self.output_file = _plano.join(self.output_dir, "output.txt")
self.status_file = _plano.join(self.output_dir, "status.txt")
self.output = None
self.proc = None
def start(self, port):
assert self.proc is None
_plano.make_dir(self.output_dir)
self.output = open(self.output_file, "w")
command = [
"quiver-server", "//127.0.0.1:{}/q0".format(port),
"--impl", self.impl,
"--ready-file", self.ready_file,
"--verbose",
]
_plano.write(self.command_file, "{}\n".format(" ".join(command)))
self.proc = _plano.start_process(command, stdout=self.output, stderr=self.output)
for i in range(30):
if _plano.read(self.ready_file) == "ready\n":
break
_plano.sleep(0.2)
else:
raise _Timeout("Timed out waiting for server to be ready")
def stop(self):
assert self.proc is not None
_plano.stop_process(self.proc)
self.output.close()
if self.proc.returncode > 0 and self.proc.returncode < 128:
_plano.write(self.status_file, "FAILED\n")
else:
_plano.write(self.status_file, "PASSED\n")
_plano.remove(self.ready_file)
def print_summary(self):
print("--- Server command ---")
print("> {}".format(_plano.read(self.command_file)), end="")
print("--- Server output ---")
for line in _plano.read_lines(self.output_file):
print("> {}".format(line), end="")
class _Timeout(Exception):
pass
|
|
import collections
import os
import sys
import numpy
try:
from PIL import Image
available = True
except ImportError as e:
available = False
_import_error = e
import chainer
from chainer.dataset.convert import concat_examples
from chainer.dataset import download
from chainer import function
from chainer.functions.activation.relu import relu
from chainer.functions.activation.softmax import softmax
from chainer.functions.array.reshape import reshape
from chainer.functions.math.average import average
from chainer.functions.noise.dropout import dropout
from chainer.functions.normalization.local_response_normalization import (
local_response_normalization)
from chainer.functions.pooling.average_pooling_2d import average_pooling_2d
from chainer.functions.pooling.max_pooling_2d import max_pooling_2d
from chainer.initializers import constant
from chainer.initializers import uniform
from chainer import link
from chainer.links.connection.convolution_2d import Convolution2D
from chainer.links.connection.inception import Inception
from chainer.links.connection.linear import Linear
from chainer.serializers import npz
from chainer.utils import argument
from chainer.utils import imgproc
from chainer.variable import Variable
class GoogLeNet(link.Chain):
"""A pre-trained GoogLeNet model provided by BVLC.
When you specify the path of the pre-trained chainer model serialized as
a ``.npz`` file in the constructor, this chain model automatically
initializes all the parameters with it.
This model would be useful when you want to extract a semantic feature
vector per image, or fine-tune the model on a different dataset.
If you want to manually convert the pre-trained caffemodel to a chainer
model that can be specified in the constructor,
please use ``convert_caffemodel_to_npz`` classmethod instead.
GoogLeNet, which is also called Inception-v1, is an architecture of
convolutional neural network proposed in 2014. This model is relatively
lightweight and requires small memory footprint during training compared
with modern architectures such as ResNet. Therefore, if you fine-tune your
network based on a model pre-trained by Imagenet and need to train it with
large batch size, GoogLeNet may be useful. On the other hand, if you just
want an off-the-shelf classifier, we recommend you to use ResNet50 or other
models since they are more accurate than GoogLeNet.
The original model is provided here:
`<https://github.com/BVLC/caffe/tree/master/models/bvlc_googlenet>`_
Args:
pretrained_model (str): the destination of the pre-trained
chainer model serialized as a ``.npz`` file.
If this argument is specified as ``auto``,
it automatically downloads the caffemodel from the internet.
Note that in this case the converted chainer model is stored
on ``$CHAINER_DATASET_ROOT/pfnet/chainer/models`` directory,
where ``$CHAINER_DATASET_ROOT`` is set as
``$HOME/.chainer/dataset`` unless you specify another value
as a environment variable. The converted chainer model is
automatically used from the second time.
If the argument is specified as ``None``, all the parameters
are not initialized by the pre-trained model, but the default
initializer used in BVLC, i.e.,
``chainer.initializers.LeCunUniform(scale=1.0)``.
Note that, in Caffe, when weight_filler is specified as
"xavier" type without variance_norm parameter, the weights are
initialized by Uniform(-s, s), where
:math:`s = \\sqrt{\\frac{3}{fan_{in}}}` and :math:`fan_{in}` is the
number of input units. This corresponds to LeCunUniform in Chainer
but not GlorotUniform.
Attributes:
available_layers (list of str): The list of available layer names
used by ``forward`` and ``extract`` methods.
"""
def __init__(self, pretrained_model='auto'):
super(GoogLeNet, self).__init__()
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
kwargs = {'initialW': constant.Zero()}
else:
# employ default initializers used in BVLC. For more detail, see
# https://github.com/chainer/chainer/pull/2424#discussion_r109642209
kwargs = {'initialW': uniform.LeCunUniform(scale=1.0)}
with self.init_scope():
self.conv1 = Convolution2D(3, 64, 7, stride=2, pad=3, **kwargs)
self.conv2_reduce = Convolution2D(64, 64, 1, **kwargs)
self.conv2 = Convolution2D(64, 192, 3, stride=1, pad=1, **kwargs)
self.inc3a = Inception(192, 64, 96, 128, 16, 32, 32)
self.inc3b = Inception(256, 128, 128, 192, 32, 96, 64)
self.inc4a = Inception(480, 192, 96, 208, 16, 48, 64)
self.inc4b = Inception(512, 160, 112, 224, 24, 64, 64)
self.inc4c = Inception(512, 128, 128, 256, 24, 64, 64)
self.inc4d = Inception(512, 112, 144, 288, 32, 64, 64)
self.inc4e = Inception(528, 256, 160, 320, 32, 128, 128)
self.inc5a = Inception(832, 256, 160, 320, 32, 128, 128)
self.inc5b = Inception(832, 384, 192, 384, 48, 128, 128)
self.loss3_fc = Linear(1024, 1000, **kwargs)
self.loss1_conv = Convolution2D(512, 128, 1, **kwargs)
self.loss1_fc1 = Linear(2048, 1024, **kwargs)
self.loss1_fc2 = Linear(1024, 1000, **kwargs)
self.loss2_conv = Convolution2D(528, 128, 1, **kwargs)
self.loss2_fc1 = Linear(2048, 1024, **kwargs)
self.loss2_fc2 = Linear(1024, 1000, **kwargs)
if pretrained_model == 'auto':
_retrieve(
'bvlc_googlenet.npz',
'http://dl.caffe.berkeleyvision.org/bvlc_googlenet.caffemodel',
self)
elif pretrained_model:
npz.load_npz(pretrained_model, self)
@property
def functions(self):
return collections.OrderedDict([
('conv1', [self.conv1, relu]),
('pool1', [_max_pooling_2d, _local_response_normalization]),
('conv2_reduce', [self.conv2_reduce, relu]),
('conv2', [self.conv2, relu, _local_response_normalization]),
('pool2', [_max_pooling_2d]),
('inception_3a', [self.inc3a]),
('inception_3b', [self.inc3b]),
('pool3', [_max_pooling_2d]),
('inception_4a', [self.inc4a]),
('inception_4b', [self.inc4b]),
('inception_4c', [self.inc4c]),
('inception_4d', [self.inc4d]),
('inception_4e', [self.inc4e]),
('pool4', [_max_pooling_2d]),
('inception_5a', [self.inc5a]),
('inception_5b', [self.inc5b]),
('pool5', [_average_pooling_2d_k7]),
('loss3_fc', [_dropout, self.loss3_fc]),
('prob', [softmax]),
# Since usually the following outputs are not used, they are put
# after 'prob' to be skipped for efficiency.
('loss1_fc2', [_average_pooling_2d_k5, self.loss1_conv, relu,
self.loss1_fc1, relu, self.loss1_fc2]),
('loss2_fc2', [_average_pooling_2d_k5, self.loss2_conv, relu,
self.loss2_fc1, relu, self.loss2_fc2])
])
@property
def available_layers(self):
return list(self.functions.keys())
@classmethod
def convert_caffemodel_to_npz(cls, path_caffemodel, path_npz):
"""Converts a pre-trained caffemodel to a chainer model.
Args:
path_caffemodel (str): Path of the pre-trained caffemodel.
path_npz (str): Path of the converted chainer model.
"""
# As CaffeFunction uses shortcut symbols,
# we import CaffeFunction here.
from chainer.links.caffe.caffe_function import CaffeFunction
caffemodel = CaffeFunction(path_caffemodel)
chainermodel = cls(pretrained_model=None)
_transfer_googlenet(caffemodel, chainermodel)
npz.save_npz(path_npz, chainermodel, compression=False)
def forward(self, x, layers=None, **kwargs):
"""forward(self, x, layers=['prob'])
Computes all the feature maps specified by ``layers``.
.. warning::
``train`` argument is not supported anymore since v2.
Instead, use ``chainer.using_config('train', train)``.
See :func:`chainer.using_config`.
Args:
x (~chainer.Variable): Input variable. It should be prepared by
``prepare`` function.
layers (list of str): The list of layer names you want to extract.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['prob']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
h = x
activations = {}
inception_4a_cache = None
inception_4d_cache = None
target_layers = set(layers)
for key, funcs in self.functions.items():
if len(target_layers) == 0:
break
if key == 'loss1_fc2':
h = inception_4a_cache
elif key == 'loss2_fc2':
h = inception_4d_cache
for func in funcs:
h = func(h)
if key in target_layers:
activations[key] = h
target_layers.remove(key)
if key == 'inception_4a':
inception_4a_cache = h
elif key == 'inception_4d':
inception_4d_cache = h
return activations
def extract(self, images, layers=None, size=(224, 224), **kwargs):
"""extract(self, images, layers=['pool5'], size=(224, 224))
Extracts all the feature maps of given images.
The difference of directly executing ``forward`` is that
it directly accepts images as an input and automatically
transforms them to a proper variable. That is,
it is also interpreted as a shortcut method that implicitly calls
``prepare`` and ``forward`` functions.
Unlike ``predict`` method, this method does not override
``chainer.config.train`` and ``chainer.config.enable_backprop``
configuration. If you want to extract features without updating
model parameters, you need to manually set configuration when
calling this method as follows:
.. code-block:: python
# model is an instance of `GoogLeNet`
with chainer.using_config('train', False):
with chainer.using_config('enable_backprop', False):
feature = model.extract([image])
.. warning::
``train`` and ``volatile`` arguments are not supported
anymore since v2. Instead, users should configure
training and volatile modes with ``train`` and
``enable_backprop``, respectively.
Note that default behavior of this method is different
between v1 and later versions. Specifically,
the default values of ``train`` arguments in v1 were
``False`` and ``OFF``, while that of
``chainer.config.train`` are ``True``.
Therefore, users need to explicitly switch ``train``
to ``False`` to run the code in test mode to turn off
coputational graph construction.
See the `upgrade guide <https://docs.chainer.org/en/stable\
/upgrade_v2.html#training-mode-is-configured-by-a-thread-local-flag>`_.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
layers (list of str): The list of layer names you want to extract.
size (pair of ints): The resolution of resized images used as
an input of CNN. All the given images are not resized
if this argument is ``None``, but the resolutions of
all the images should be the same.
Returns:
Dictionary of ~chainer.Variable: A directory in which
the key contains the layer name and the value contains
the corresponding feature map variable.
"""
if layers is None:
layers = ['pool5']
if kwargs:
argument.check_unexpected_kwargs(
kwargs, train='train argument is not supported anymore. '
'Use chainer.using_config',
volatile='volatile argument is not supported anymore. '
'Use chainer.using_config')
argument.assert_kwargs_empty(kwargs)
x = concat_examples([prepare(img, size=size) for img in images])
x = Variable(self.xp.asarray(x))
return self(x, layers=layers)
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = average(y, axis=1)
return y
def prepare(image, size=(224, 224)):
"""Converts the given image to the numpy array for GoogLeNet.
Note that you have to call this method before ``forward``
because the pre-trained GoogLeNet model requires to resize the given
image, covert the RGB to the BGR, subtract the mean,
and permute the dimensions before calling.
Args:
image (PIL.Image or numpy.ndarray): Input image.
If an input is ``numpy.ndarray``, its shape must be
``(height, width)``, ``(height, width, channels)``,
or ``(channels, height, width)``, and
the order of the channels must be RGB.
size (pair of ints): Size of converted images.
If ``None``, the given image is not resized.
Returns:
numpy.ndarray: The converted output array.
"""
if not available:
raise ImportError('PIL cannot be loaded. Install Pillow!\n'
'The actual import error is as follows:\n' +
str(_import_error))
dtype = chainer.get_dtype()
if isinstance(image, numpy.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(numpy.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = numpy.asarray(image, dtype=dtype)
image = image[:, :, ::-1]
image -= numpy.array([104.0, 117.0, 123.0], dtype=dtype) # BGR
image = image.transpose((2, 0, 1))
return image
def _transfer_inception(src, dst, names):
for name in names:
chain = getattr(dst, 'inc{}'.format(name))
src_prefix = 'inception_{}/'.format(name)
chain.conv1.W.array[:] = src[src_prefix + '1x1'].W.array
chain.conv1.b.array[:] = src[src_prefix + '1x1'].b.array
chain.proj3.W.array[:] = src[src_prefix + '3x3_reduce'].W.array
chain.proj3.b.array[:] = src[src_prefix + '3x3_reduce'].b.array
chain.conv3.W.array[:] = src[src_prefix + '3x3'].W.array
chain.conv3.b.array[:] = src[src_prefix + '3x3'].b.array
chain.proj5.W.array[:] = src[src_prefix + '5x5_reduce'].W.array
chain.proj5.b.array[:] = src[src_prefix + '5x5_reduce'].b.array
chain.conv5.W.array[:] = src[src_prefix + '5x5'].W.array
chain.conv5.b.array[:] = src[src_prefix + '5x5'].b.array
chain.projp.W.array[:] = src[src_prefix + 'pool_proj'].W.array
chain.projp.b.array[:] = src[src_prefix + 'pool_proj'].b.array
def _transfer_googlenet(src, dst):
# 1 #################################################################
dst.conv1.W.array[:] = src['conv1/7x7_s2'].W.array
dst.conv1.b.array[:] = src['conv1/7x7_s2'].b.array
# 2 #################################################################
dst.conv2_reduce.W.array[:] = src['conv2/3x3_reduce'].W.array
dst.conv2_reduce.b.array[:] = src['conv2/3x3_reduce'].b.array
dst.conv2.W.array[:] = src['conv2/3x3'].W.array
dst.conv2.b.array[:] = src['conv2/3x3'].b.array
# 3, 4, 5 ###########################################################
_transfer_inception(src, dst, ['3a', '3b',
'4a', '4b', '4c', '4d', '4e',
'5a', '5b'])
# outputs ############################################################
dst.loss1_conv.W.array[:] = src['loss1/conv'].W.array
dst.loss1_conv.b.array[:] = src['loss1/conv'].b.array
dst.loss1_fc1.W.array[:] = src['loss1/fc'].W.array
dst.loss1_fc1.b.array[:] = src['loss1/fc'].b.array
dst.loss1_fc2.W.array[:] = src['loss1/classifier'].W.array
dst.loss1_fc2.b.array[:] = src['loss1/classifier'].b.array
dst.loss2_conv.W.array[:] = src['loss2/conv'].W.array
dst.loss2_conv.b.array[:] = src['loss2/conv'].b.array
dst.loss2_fc1.W.array[:] = src['loss2/fc'].W.array
dst.loss2_fc1.b.array[:] = src['loss2/fc'].b.array
dst.loss2_fc2.W.array[:] = src['loss2/classifier'].W.array
dst.loss2_fc2.b.array[:] = src['loss2/classifier'].b.array
dst.loss3_fc.W.array[:] = src['loss3/classifier'].W.array
dst.loss3_fc.b.array[:] = src['loss3/classifier'].b.array
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=3, stride=2)
def _local_response_normalization(x):
return local_response_normalization(x, n=5, k=1, alpha=1e-4 / 5)
def _average_pooling_2d_k5(x):
return average_pooling_2d(x, ksize=5, stride=3)
def _average_pooling_2d_k7(x):
return average_pooling_2d(x, ksize=7, stride=1)
def _dropout(x):
return dropout(x, ratio=0.4)
def _make_npz(path_npz, url, model):
path_caffemodel = download.cached_download(url)
sys.stderr.write(
'Now loading caffemodel (usually it may take few minutes)\n')
sys.stderr.flush()
GoogLeNet.convert_caffemodel_to_npz(path_caffemodel, path_npz)
npz.load_npz(path_npz, model)
return model
def _retrieve(name_npz, url, model):
root = download.get_dataset_directory('pfnet/chainer/models/')
path = os.path.join(root, name_npz)
return download.cache_or_load_file(
path, lambda path: _make_npz(path, url, model),
lambda path: npz.load_npz(path, model))
|
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors,
# The HuggingFace Inc. team, and The XTREME Benchmark Authors.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for NER/POS tagging tasks."""
from __future__ import absolute_import, division, print_function
import logging
import os
from io import open
from transformers import XLMTokenizer
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for token classification."""
def __init__(self, guid, words, labels, langs=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The labels for each word of the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.words = words
self.labels = labels
self.langs = langs
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids, langs=None):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
self.langs = langs
def read_examples_from_file(file_path, lang, lang2id=None):
if not os.path.exists(file_path):
logger.info("[Warming] file {} not exists".format(file_path))
return []
guid_index = 1
examples = []
subword_len_counter = 0
if lang2id:
lang_id = lang2id.get(lang, lang2id['en'])
else:
lang_id = 0
logger.info("lang_id={}, lang={}, lang2id={}".format(lang_id, lang, lang2id))
with open(file_path, encoding="utf-8") as f:
words = []
labels = []
langs = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if word:
examples.append(InputExample(guid="{}-{}".format(lang, guid_index),
words=words,
labels=labels,
langs=langs))
guid_index += 1
words = []
labels = []
langs = []
subword_len_counter = 0
else:
print(f'guid_index', guid_index, words, langs, labels, subword_len_counter)
else:
splits = line.split("\t")
word = splits[0]
words.append(splits[0])
langs.append(lang_id)
if len(splits) > 1:
labels.append(splits[-1].replace("\n", ""))
else:
# Examples could have no label for mode = "test"
labels.append("O")
if words:
examples.append(InputExample(guid="%s-%d".format(lang, guid_index),
words=words,
labels=labels,
langs=langs))
return examples
def convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
pad_token_label_id=-1,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
lang='en'):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d", ex_index, len(examples))
tokens = []
label_ids = []
for word, label in zip(example.words, example.labels):
if isinstance(tokenizer, XLMTokenizer):
word_tokens = tokenizer.tokenize(word, lang=lang)
else:
word_tokens = tokenizer.tokenize(word)
if len(word) != 0 and len(word_tokens) == 0:
word_tokens = [tokenizer.unk_token]
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
if len(tokens) > max_seq_length - special_tokens_count:
print('truncate token', len(tokens), max_seq_length, special_tokens_count)
tokens = tokens[:(max_seq_length - special_tokens_count)]
label_ids = label_ids[:(max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
label_ids = [pad_token_label_id] + label_ids
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
label_ids = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += ([pad_token] * padding_length)
input_mask += ([0 if mask_padding_with_zero else 1] * padding_length)
segment_ids += ([pad_token_segment_id] * padding_length)
label_ids += ([pad_token_label_id] * padding_length)
if example.langs and len(example.langs) > 0:
langs = [example.langs[0]] * max_seq_length
else:
print('example.langs', example.langs, example.words, len(example.langs))
print('ex_index', ex_index, len(examples))
langs = None
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
assert len(langs) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s", example.guid)
logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
logger.info("langs: {}".format(langs))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids,
langs=langs))
return features
def get_labels(path):
with open(path, "r") as f:
labels = f.read().splitlines()
if "O" not in labels:
labels = ["O"] + labels
return labels
|
|
from __future__ import print_function, division
import numpy as np
from numpy.testing import (run_module_suite, assert_array_almost_equal_nulp,
assert_almost_equal, assert_array_equal,
assert_raises, assert_)
import warnings
from skimage.restoration import unwrap_phase
from skimage._shared._warnings import expected_warnings
def assert_phase_almost_equal(a, b, *args, **kwargs):
"""An assert_almost_equal insensitive to phase shifts of n*2*pi."""
shift = 2 * np.pi * np.round((b.mean() - a.mean()) / (2 * np.pi))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print('assert_phase_allclose, abs', np.max(np.abs(a - (b - shift))))
print('assert_phase_allclose, rel',
np.max(np.abs((a - (b - shift)) / a)))
if np.ma.isMaskedArray(a):
assert_(np.ma.isMaskedArray(b))
assert_array_equal(a.mask, b.mask)
au = np.asarray(a)
bu = np.asarray(b)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print('assert_phase_allclose, no mask, abs',
np.max(np.abs(au - (bu - shift))))
print('assert_phase_allclose, no mask, rel',
np.max(np.abs((au - (bu - shift)) / au)))
assert_array_almost_equal_nulp(a + shift, b, *args, **kwargs)
def check_unwrap(image, mask=None):
image_wrapped = np.angle(np.exp(1j * image))
if mask is not None:
print('Testing a masked image')
image = np.ma.array(image, mask=mask)
image_wrapped = np.ma.array(image_wrapped, mask=mask)
image_unwrapped = unwrap_phase(image_wrapped, seed=0)
assert_phase_almost_equal(image_unwrapped, image)
def test_unwrap_1d():
image = np.linspace(0, 10 * np.pi, 100)
check_unwrap(image)
# Masked arrays are not allowed in 1D
assert_raises(ValueError, check_unwrap, image, True)
# wrap_around is not allowed in 1D
assert_raises(ValueError, unwrap_phase, image, True, seed=0)
def test_unwrap_2d():
x, y = np.ogrid[:8, :16]
image = 2 * np.pi * (x * 0.2 + y * 0.1)
yield check_unwrap, image
mask = np.zeros(image.shape, dtype=np.bool)
mask[4:6, 4:8] = True
yield check_unwrap, image, mask
def test_unwrap_3d():
x, y, z = np.ogrid[:8, :12, :16]
image = 2 * np.pi * (x * 0.2 + y * 0.1 + z * 0.05)
yield check_unwrap, image
mask = np.zeros(image.shape, dtype=np.bool)
mask[4:6, 4:6, 1:3] = True
yield check_unwrap, image, mask
def check_wrap_around(ndim, axis):
# create a ramp, but with the last pixel along axis equalling the first
elements = 100
ramp = np.linspace(0, 12 * np.pi, elements)
ramp[-1] = ramp[0]
image = ramp.reshape(tuple([elements if n == axis else 1
for n in range(ndim)]))
image_wrapped = np.angle(np.exp(1j * image))
index_first = tuple([0] * ndim)
index_last = tuple([-1 if n == axis else 0 for n in range(ndim)])
# unwrap the image without wrap around
with warnings.catch_warnings():
# We do not want warnings about length 1 dimensions
warnings.simplefilter("ignore")
image_unwrap_no_wrap_around = unwrap_phase(image_wrapped, seed=0)
print('endpoints without wrap_around:',
image_unwrap_no_wrap_around[index_first],
image_unwrap_no_wrap_around[index_last])
# without wrap around, the endpoints of the image should differ
assert_(abs(image_unwrap_no_wrap_around[index_first] -
image_unwrap_no_wrap_around[index_last]) > np.pi)
# unwrap the image with wrap around
wrap_around = [n == axis for n in range(ndim)]
with warnings.catch_warnings():
# We do not want warnings about length 1 dimensions
warnings.simplefilter("ignore")
image_unwrap_wrap_around = unwrap_phase(image_wrapped, wrap_around,
seed=0)
print('endpoints with wrap_around:',
image_unwrap_wrap_around[index_first],
image_unwrap_wrap_around[index_last])
# with wrap around, the endpoints of the image should be equal
assert_almost_equal(image_unwrap_wrap_around[index_first],
image_unwrap_wrap_around[index_last])
def test_wrap_around():
for ndim in (2, 3):
for axis in range(ndim):
yield check_wrap_around, ndim, axis
def test_mask():
length = 100
ramps = [np.linspace(0, 4 * np.pi, length),
np.linspace(0, 8 * np.pi, length),
np.linspace(0, 6 * np.pi, length)]
image = np.vstack(ramps)
mask_1d = np.ones((length,), dtype=np.bool)
mask_1d[0] = mask_1d[-1] = False
for i in range(len(ramps)):
# mask all ramps but the i'th one
mask = np.zeros(image.shape, dtype=np.bool)
mask |= mask_1d.reshape(1, -1)
mask[i, :] = False # unmask i'th ramp
image_wrapped = np.ma.array(np.angle(np.exp(1j * image)), mask=mask)
image_unwrapped = unwrap_phase(image_wrapped)
image_unwrapped -= image_unwrapped[0, 0] # remove phase shift
# The end of the unwrapped array should have value equal to the
# endpoint of the unmasked ramp
assert_array_almost_equal_nulp(image_unwrapped[:, -1], image[i, -1])
assert_(np.ma.isMaskedArray(image_unwrapped))
# Same tests, but forcing use of the 3D unwrapper by reshaping
with expected_warnings(['length 1 dimension']):
shape = (1,) + image_wrapped.shape
image_wrapped_3d = image_wrapped.reshape(shape)
image_unwrapped_3d = unwrap_phase(image_wrapped_3d)
# remove phase shift
image_unwrapped_3d -= image_unwrapped_3d[0, 0, 0]
assert_array_almost_equal_nulp(image_unwrapped_3d[:, :, -1], image[i, -1])
def test_invalid_input():
assert_raises(ValueError, unwrap_phase, np.zeros([]))
assert_raises(ValueError, unwrap_phase, np.zeros((1, 1, 1, 1)))
assert_raises(ValueError, unwrap_phase, np.zeros((1, 1)), 3 * [False])
assert_raises(ValueError, unwrap_phase, np.zeros((1, 1)), 'False')
def test_unwrap_3d_middle_wrap_around():
# Segmentation fault in 3D unwrap phase with middle dimension connected
# GitHub issue #1171
image = np.zeros((20, 30, 40), dtype=np.float32)
unwrap = unwrap_phase(image, wrap_around=[False, True, False])
assert_(np.all(unwrap == 0))
def test_unwrap_2d_compressed_mask():
# ValueError when image is masked array with a compressed mask (no masked
# elments). GitHub issue #1346
image = np.ma.zeros((10, 10))
unwrap = unwrap_phase(image)
assert_(np.all(unwrap == 0))
def test_unwrap_2d_all_masked():
# Segmentation fault when image is masked array with a all elements masked
# GitHub issue #1347
# all elements masked
image = np.ma.zeros((10, 10))
image[:] = np.ma.masked
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.all(unwrap.mask))
# 1 unmasked element, still zero edges
image = np.ma.zeros((10, 10))
image[:] = np.ma.masked
image[0, 0] = 0
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.sum(unwrap.mask) == 99) # all but one masked
assert_(unwrap[0, 0] == 0)
def test_unwrap_3d_all_masked():
# all elements masked
image = np.ma.zeros((10, 10, 10))
image[:] = np.ma.masked
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.all(unwrap.mask))
# 1 unmasked element, still zero edges
image = np.ma.zeros((10, 10, 10))
image[:] = np.ma.masked
image[0, 0, 0] = 0
unwrap = unwrap_phase(image)
assert_(np.ma.isMaskedArray(unwrap))
assert_(np.sum(unwrap.mask) == 999) # all but one masked
assert_(unwrap[0, 0, 0] == 0)
if __name__ == "__main__":
run_module_suite()
|
|
## @file GenDecFile.py
#
# This file contained the logical of transfer package object to DEC files.
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
GenDEC
'''
from Library.Parsing import GenSection
from Library.CommentGenerating import GenHeaderCommentSection
from Library.CommentGenerating import GenGenericCommentF
from Library.CommentGenerating import GenDecTailComment
from Library.CommentGenerating import _GetHelpStr
from Library.Misc import GuidStringToGuidStructureString
from Library.Misc import SaveFileOnChange
from Library.Misc import ConvertPath
from Library.DataType import TAB_SPACE_SPLIT
from Library.DataType import TAB_COMMA_SPLIT
from Library.DataType import TAB_ARCH_COMMON
from Library.DataType import TAB_DEC_DEFINES_DEC_SPECIFICATION
from Library.DataType import TAB_DEC_DEFINES_PACKAGE_NAME
from Library.DataType import TAB_DEC_DEFINES_PACKAGE_GUID
from Library.DataType import TAB_DEC_DEFINES_PACKAGE_VERSION
def GenPcd(Package, Content):
#
# generate [Pcd] section
# <TokenSpcCName>.<TokenCName>|<Value>|<DatumType>|<Token>
#
ValidUsageDict = {}
for Pcd in Package.GetPcdList():
#
# Generate generic comment
#
HelpTextList = Pcd.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr, 2)
PcdErrList = Pcd.GetPcdErrorsList()
if PcdErrList:
CommentStr += GenPcdErrComment(PcdErrList[0])
Statement = CommentStr
CName = Pcd.GetCName()
TokenSpaceGuidCName = Pcd.GetTokenSpaceGuidCName()
DefaultValue = Pcd.GetDefaultValue()
DatumType = Pcd.GetDatumType()
Token = Pcd.GetToken()
ValidUsage = Pcd.GetValidUsage()
if ValidUsage == 'FeaturePcd':
ValidUsage = 'PcdsFeatureFlag'
elif ValidUsage == 'PatchPcd':
ValidUsage = 'PcdsPatchableInModule'
elif ValidUsage == 'FixedPcd':
ValidUsage = 'PcdsFixedAtBuild'
elif ValidUsage == 'Pcd':
ValidUsage = 'PcdsDynamic'
elif ValidUsage == 'PcdEx':
ValidUsage = 'PcdsDynamicEx'
if ValidUsage in ValidUsageDict:
NewSectionDict = ValidUsageDict[ValidUsage]
else:
NewSectionDict = {}
ValidUsageDict[ValidUsage] = NewSectionDict
Statement += TokenSpaceGuidCName + '.' + CName
Statement += '|' + DefaultValue
Statement += '|' + DatumType
Statement += '|' + Token
#
# generate tail comment
#
if Pcd.GetSupModuleList():
Statement += GenDecTailComment(Pcd.GetSupModuleList())
ArchList = Pcd.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
for ValidUsage in ValidUsageDict:
Content += GenSection(ValidUsage, ValidUsageDict[ValidUsage])
return Content
def GenGuidProtocolPpi(Package, Content):
#
# generate [Guids] section
#
NewSectionDict = {}
for Guid in Package.GetGuidList():
#
# Generate generic comment
#
HelpTextList = Guid.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr, 2)
Statement = CommentStr
CName = Guid.GetCName()
Value = GuidStringToGuidStructureString(Guid.GetGuid())
Statement += CName + ' = ' + Value
#
# generate tail comment
#
if Guid.GetSupModuleList():
Statement += GenDecTailComment(Guid.GetSupModuleList())
ArchList = Guid.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Guids', NewSectionDict)
#
# generate [Protocols] section
#
NewSectionDict = {}
for Protocol in Package.GetProtocolList():
#
# Generate generic comment
#
HelpTextList = Protocol.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr, 2)
Statement = CommentStr
CName = Protocol.GetCName()
Value = GuidStringToGuidStructureString(Protocol.GetGuid())
Statement += CName + ' = ' + Value
#
# generate tail comment
#
if Protocol.GetSupModuleList():
Statement += GenDecTailComment(Protocol.GetSupModuleList())
ArchList = Protocol.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Protocols', NewSectionDict)
#
# generate [Ppis] section
#
NewSectionDict = {}
for Ppi in Package.GetPpiList():
#
# Generate generic comment
#
HelpTextList = Ppi.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
CommentStr = GenGenericCommentF(HelpStr, 2)
Statement = CommentStr
CName = Ppi.GetCName()
Value = GuidStringToGuidStructureString(Ppi.GetGuid())
Statement += CName + ' = ' + Value
#
# generate tail comment
#
if Ppi.GetSupModuleList():
Statement += GenDecTailComment(Ppi.GetSupModuleList())
ArchList = Ppi.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('Ppis', NewSectionDict)
return Content
## Transfer Package Object to Dec files
#
# Transfer all contents of a standard Package Object to a Dec file
#
# @param Package: A Package
#
def PackageToDec(Package):
#
# Init global information for the file
#
ContainerFile = Package.GetFullPath()
Content = ''
#
# generate header comment section
#
Content += GenHeaderCommentSection(Package.GetAbstract(), \
Package.GetDescription(), \
Package.GetCopyright(), \
Package.GetLicense())
#
# for each section, maintain a dict, sorted arch will be its key,
#statement list will be its data
# { 'Arch1 Arch2 Arch3': [statement1, statement2],
# 'Arch1' : [statement1, statement3]
# }
#
#
# generate [Defines] section
#
NewSectionDict = {TAB_ARCH_COMMON : []}
SpecialItemList = []
Statement = '%s = %s' % (TAB_DEC_DEFINES_DEC_SPECIFICATION, '0x00010017')
SpecialItemList.append(Statement)
BaseName = Package.GetBaseName()
if BaseName.startswith('.') or BaseName.startswith('-'):
BaseName = '_' + BaseName
Statement = '%s = %s' % (TAB_DEC_DEFINES_PACKAGE_NAME, BaseName)
SpecialItemList.append(Statement)
Statement = '%s = %s' % (TAB_DEC_DEFINES_PACKAGE_VERSION, Package.GetVersion())
SpecialItemList.append(Statement)
Statement = '%s = %s' % (TAB_DEC_DEFINES_PACKAGE_GUID, Package.GetGuid())
SpecialItemList.append(Statement)
for SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + SpecialItemList
Content += GenSection('Defines', NewSectionDict)
#
# generate [Includes] section
#
NewSectionDict = {}
IncludeArchList = Package.GetIncludeArchList()
if IncludeArchList:
for Path, ArchList in IncludeArchList:
Statement = Path
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [ConvertPath(Statement)]
else:
NewSectionDict[SortedArch] = [ConvertPath(Statement)]
Content += GenSection('Includes', NewSectionDict)
Content = GenGuidProtocolPpi(Package, Content)
#
# generate [LibraryClasses] section
#
NewSectionDict = {}
for LibraryClass in Package.GetLibraryClassList():
#
# Generate generic comment
#
HelpTextList = LibraryClass.GetHelpTextList()
HelpStr = _GetHelpStr(HelpTextList)
if HelpStr:
HelpStr = '@libraryclass ' + HelpStr
CommentStr = GenGenericCommentF(HelpStr, 2)
Statement = CommentStr
Name = LibraryClass.GetLibraryClass()
IncludeHeader = LibraryClass.GetIncludeHeader()
Statement += Name + '|' + ConvertPath(IncludeHeader)
#
# generate tail comment
#
if LibraryClass.GetSupModuleList():
Statement += \
GenDecTailComment(LibraryClass.GetSupModuleList())
ArchList = LibraryClass.GetSupArchList()
ArchList.sort()
SortedArch = ' '.join(ArchList)
if SortedArch in NewSectionDict:
NewSectionDict[SortedArch] = \
NewSectionDict[SortedArch] + [Statement]
else:
NewSectionDict[SortedArch] = [Statement]
Content += GenSection('LibraryClasses', NewSectionDict)
Content = GenPcd(Package, Content)
#
# generate [UserExtensions] section
#
NewSectionDict = {}
for UserExtension in Package.GetUserExtensionList():
Statement = UserExtension.GetStatement()
if not Statement:
continue
SectionList = []
SectionName = 'UserExtensions'
UserId = UserExtension.GetUserID()
if UserId:
if '.' in UserId:
UserId = '"' + UserId + '"'
SectionName += '.' + UserId
if UserExtension.GetIdentifier():
SectionName += '.' + '"' + UserExtension.GetIdentifier() + '"'
if not UserExtension.GetSupArchList():
SectionList.append(SectionName)
else:
for Arch in UserExtension.GetSupArchList():
SectionList.append(SectionName + '.' + Arch)
SectionName = ', '.join(SectionList)
SectionName = ''.join(['[', SectionName, ']\n'])
Content += '\n\n' + SectionName + Statement
SaveFileOnChange(ContainerFile, Content, False)
return ContainerFile
## GenPcdErrComment
#
# @param PcdErrObject: PcdErrorObject
#
# @retval CommentStr: Generated comment lines, with prefix "#"
#
def GenPcdErrComment (PcdErrObject):
EndOfLine = "\n"
ValidValueRange = PcdErrObject.GetValidValueRange()
if ValidValueRange:
CommentStr = "# @ValidRange " + ValidValueRange + EndOfLine
ValidValue = PcdErrObject.GetValidValue()
if ValidValue:
ValidValueList = \
[Value for Value in ValidValue.split(TAB_SPACE_SPLIT) if Value]
CommentStr = \
"# @ValidList " + TAB_COMMA_SPLIT.join(ValidValueList) + EndOfLine
Expression = PcdErrObject.GetExpression()
if Expression:
CommentStr = "# @Expression " + Expression + EndOfLine
return CommentStr
|
|
#!/usr/bin/env python
from __future__ import print_function
import sys
from datetime import datetime
import time
import array
import struct
import os
import codecs
import dpansf
if sys.version_info[0] < 3:
input = raw_input
class Bye(Exception):
pass
def collect_screenshot(dest, ser):
import Image
t0 = time.time()
match = b"!screenshot"
have = b"X" * len(match)
while have != match:
have = (have + ser.read(1))[-len(match):]
(w, h) = struct.unpack("II", ser.read(8))
print('%dx%d image' % (w, h),)
sys.stdout.flush()
if 0:
imd = ser.read(4 * w * h)
im = Image.fromstring("RGBA", (w, h), imd)
else:
# print [ord(c) for c in ser.read(20)]
def getn():
b = ord(ser.read(1))
n = b
while b == 255:
b = ord(ser.read(1))
n += b
# print ' length', n
return n
imd = ""
for y in range(h):
print('line', y)
prev = 4 * chr(0)
d = ""
while len(d) < 4 * w:
# print ' have', len(d) / 4
d += prev * getn()
d += ser.read(4 * getn())
prev = d[-4:]
assert len(d) == 4 * w, 'corrupted screen dump stream'
imd += d
im = Image.fromstring("RGBA", (w, h), imd)
(b,g,r,a) = im.split()
im = Image.merge("RGBA", (r, g, b, a))
im.convert("RGB").save(dest)
took = time.time() - t0
print('took %.1fs. Wrote RGB image to %s' % (took, dest))
# ser.write(b'k')
# ser.flush()
class TetheredTarget:
verbose = True
cellsize = 4
def __init__(self, port):
self.open_ser(port, 115200)
self.searchpath = ['.']
self.log = open("log", "w")
self.interpreting = True
def open_ser(self, port, speed):
try:
import serial
except:
print("This tool needs PySerial, but it was not found")
sys.exit(1)
self.ser = serial.Serial(port, speed, timeout=None, rtscts=0)
def custom(self):
self.tex = open("log.tex", "wt")
self.texlog(r"\begin{framed}" + '\n')
self.texlog(r"\begin{Verbatim}[commandchars=\\\{\}]" + '\n')
self.verbose = True
def texlog(self, s):
self.tex.write(s.replace('\r', '\n'))
def listen(self):
print('listen')
while 1:
c = self.ser.read(1)
print(repr(c))
def command_response(self, cmd):
ser = self.ser
# print
# print 'cmd', repr(cmd)
ser.write(cmd.encode('utf-8') + b'\r')
r = []
while True:
c = ser.read(max(1, ser.inWaiting())).decode('utf-8')
# print 'got', repr(c)
r.append(c.replace(chr(30), ''))
if chr(30) in c:
# print 'full reponse', repr("".join(r))
return "".join(r)
def interactive_command(self, cmd = None):
ser = self.ser
if cmd is not None:
ser.write(cmd.encode('utf-8') + b'\r')
r = []
while True:
if ser.inWaiting() == 0:
sys.stdout.flush()
c = ser.read(max(1, ser.inWaiting())).decode('utf-8')
clean = c.replace(chr(30), '')
sys.stdout.write(clean)
r.append(clean)
if chr(30) in c:
r = "".join(r)
self.log.write(r)
self.texlog(r)
self.interpreting = r.endswith(' ok\r\n')
return r
def include(self, filename, write = sys.stdout.write):
for p in self.searchpath:
try:
incf = codecs.open(p + "/" + filename, "r", encoding = 'utf-8')
except IOError:
continue
for l in incf:
# time.sleep(.001)
# sys.stdout.write(l)
while l.endswith('\n') or l.endswith('\r'):
l = l[:-1]
if self.verbose:
print(repr(l))
if l == "#bye":
raise Bye
l = l.expandtabs(4)
rs = l.split()
if rs and rs[0] == 'include':
self.include(rs[1])
elif l.startswith('#'):
self.shellcmd(l)
else:
r = self.command_response(l)
if r.startswith(' '):
r = r[1:]
if r.endswith(' ok\r\n'):
r = r[:-5]
if 'error: ' in r:
print('--- ERROR ---')
sys.stdout.write(l + '\n')
sys.stdout.write(r)
raise Bye
else:
write(r)
# print repr(r)
self.log.write(r)
return
print("Cannot find file %s in %r" % (filename, self.searchpath))
raise Bye
def serialize(self):
l = self.command_response('serialize')
return [int(x, 36) for x in l.split()[:-1]]
def shellcmd(self, cmd):
ser = self.ser
if cmd.startswith('#noverbose'):
self.verbose = False
elif cmd.startswith('#include'):
cmd = cmd.split()
if len(cmd) != 2:
print('Usage: #include <source-file>')
else:
try:
self.include(cmd[1])
except Bye:
pass
elif cmd.startswith('#flash'):
cmd = cmd.split()
if len(cmd) != 2:
print('Usage: #flash <dest-file>')
ser.write(b'\r')
else:
print('please wait...')
dest = cmd[1]
d = self.serialize()
print('Image is', self.cellsize*len(d), 'bytes')
if self.cellsize == 4:
if dest.endswith('.hex'):
open(dest, "w").write("".join(["%08x\n" % (x & 0xffffffff) for x in d]))
else:
open(dest, "wb").write(array.array("i", d).tostring())
else:
if dest.endswith('.hex'):
open(dest, "w").write("".join(["%04x\n" % (x & 0xffff) for x in d]))
else:
open(dest, "wb").write(array.array("h", d).tostring())
elif cmd.startswith('#setclock'):
n = datetime.utcnow()
cmd = "decimal %d %d %d %d %d %d >time&date" % (n.second, n.minute, n.hour, n.day, n.month, n.year)
ser.write(cmd.encode('utf-8') + b'\r')
ser.readline()
elif cmd.startswith('#bye'):
sys.exit(0)
elif cmd.startswith('#invent'):
def pp(s):
return " ".join(sorted(s))
words = sorted((self.command_response('words')).upper().split()[:-1])
print('duplicates:', pp(set([w for w in words if words.count(w) > 1])))
print('have CORE words: ', pp(set(dpansf.words['CORE']) & set(words)))
print('missing CORE words: ', pp(set(dpansf.words['CORE']) - set(words)))
print()
print(pp(words))
allwords = {}
for ws in dpansf.words.values():
allwords.update(ws)
print('unknown: ', pp(set(words) - set(allwords)))
print('extra:', pp(set(allwords) & (set(words) - set(dpansf.words['CORE']))))
extra = (set(allwords) & (set(words) - set(dpansf.words['CORE'])))
for ws in sorted(dpansf.words):
s = set(dpansf.words[ws])
if s & set(words):
missing = s - set(words)
if missing:
m = 'Providing names from the \wl{%s} word set'
else:
m = 'Providing the \wl{%s} word set'
print(m % dpansf.ws[ws])
if 0:
for w in sorted(extra):
ref = allwords[w]
part = ref[:ref.index('.')]
print('\href{http://forth.sourceforge.net/std/dpans/dpans%s.htm#%s}{\wordidx{%s}}' % (part, ref, w.lower()))
elif cmd.startswith('#profile'):
p = self.ser.profile()
for m in (max(p), max(p) - 1, max(p) - 2):
print("max depth", m, "at:")
print(" " + " ".join(["%04x" % (2 * i) for i,v in enumerate(p) if (v == m)]))
elif cmd.startswith('#time '):
t0 = time.time()
self.shellcmd(cmd[6:])
t1 = time.time()
print('Took %.6f seconds' % (t1 - t0))
elif cmd.startswith('#measure'):
ser = self.ser
# measure the board's clock
cmd = ":noname begin $21 emit 100000000 0 do loop again ; execute\r\n"
time.time() # warmup
ser.write(cmd.encode('utf-8') + b'\r')
while ser.read(1).decode('utf-8') != '!':
pass
t0 = time.time()
n = 0
while True:
ser.read(1)
t = time.time()
n += 1
print("%.6f MHz" % ((2 * 100.000000 * n) / (t - t0)))
elif cmd.startswith('#screenshot'):
cmd = cmd.split()
if len(cmd) != 2:
print('Usage: #screenshot <dest-image-file>')
ser.write(b'\r')
else:
dest = cmd[1]
ser.write(b'GD.screenshot\r\n')
collect_screenshot(dest, ser)
ser.write(b'\r\n')
elif cmd.startswith('#movie'):
cmd = cmd.split()
if len(cmd) != 2:
print('Usage: #movie <command>')
ser.write(b'\r')
else:
dest = cmd[1]
ser.write(b'%s\r' % cmd[1])
for i in xrange(10000):
collect_screenshot("%04d.png" % i, ser)
ser.write(b'\r\n')
else:
self.texlog(r"\underline{\textbf{%s}}" % cmd)
self.texlog('\n')
self.interactive_command(cmd)
def texlog(self, s):
pass
def shell(self, autocomplete = True):
try:
import readline
import os
histfile = os.path.join(os.path.expanduser("~"), ".swapforthhist")
try:
readline.read_history_file(histfile)
except IOError:
pass
import atexit
atexit.register(readline.write_history_file, histfile)
except ImportError:
print('[readline library not found - continuing anyway]')
autocomplete = False
if autocomplete:
words = sorted((self.command_response('words')).split())
print('Loaded', len(words), 'words')
def completer(text, state):
text = text.lower()
candidates = [w for w in words if w.lower().startswith(text)]
if state < len(candidates):
return candidates[state]
else:
return None
if readline.__doc__ and ('libedit' in readline.__doc__):
readline.parse_and_bind("bind ^I rl_complete")
else:
readline.parse_and_bind("tab: complete")
readline.set_completer(completer)
readline.set_completer_delims(' ')
ser = self.ser
while True:
try:
if self.interpreting:
prompt = '>'
else:
prompt = '+'
cmd = input(prompt).strip()
self.shellcmd(cmd)
except KeyboardInterrupt:
print
self.interrupt()
except EOFError:
self.texlog(r"\end{Verbatim}" + '\n')
self.texlog(r"\end{framed}" + '\n')
break
def main(Tethered):
port = '/dev/ttyUSB0'
image = None
r = None
searchpath = []
args = sys.argv[1:]
while args:
a = args[0]
if a.startswith('-i'):
image = args[1]
args = args[2:]
elif a.startswith('-h'):
port = args[1]
args = args[2:]
elif a.startswith('-p'):
searchpath.append(args[1])
args = args[2:]
else:
if not r:
r = Tethered(port)
r.boot(image)
r.searchpath += searchpath
if a.startswith('-e'):
r.shellcmd(args[1])
args = args[2:]
else:
try:
r.include(a)
except Bye:
pass
args = args[1:]
if not r:
r = Tethered(port)
r.boot(image)
r.searchpath += searchpath
r.shell()
|
|
#!/usr/bin/env python
import errno
import hashlib
import fnmatch
import os
import platform
import re
import repo
import subprocess
import sys
from lldbbuild import *
#### SETTINGS ####
def LLVM_HASH_INCLUDES_DIFFS():
return False
# For use with Xcode-style builds
def process_vcs(vcs):
return {
"svn": VCS.svn,
"git": VCS.git
}[vcs]
def process_root(name):
return {
"llvm": llvm_source_path(),
"clang": clang_source_path(),
"ninja": ninja_source_path()
}[name]
def process_repo(r):
return {
'name': r["name"],
'vcs': process_vcs(r["vcs"]),
'root': process_root(r["name"]),
'url': r["url"],
'ref': r["ref"]
}
def fallback_repo(name):
return {
'name': name,
'vcs': None,
'root': process_root(name),
'url': None,
'ref': None
}
def dirs_exist(names):
for name in names:
if not os.path.isdir(process_root(name)):
return False
return True
def XCODE_REPOSITORIES():
names = ["llvm", "clang", "ninja"]
if dirs_exist(names):
return [fallback_repo(n) for n in names]
override = repo.get_override()
if override:
return [process_repo(r) for r in override]
identifier = repo.identifier()
if identifier == None:
identifier = "<invalid>" # repo.find will just use the fallback file
set = repo.find(identifier)
return [process_repo(r) for r in set]
def get_c_compiler():
return subprocess.check_output([
'xcrun',
'--sdk', 'macosx',
'-find', 'clang'
]).rstrip()
def get_cxx_compiler():
return subprocess.check_output([
'xcrun',
'--sdk', 'macosx',
'-find', 'clang++'
]).rstrip()
# CFLAGS="-isysroot $(xcrun --sdk macosx --show-sdk-path) -mmacosx-version-min=${DARWIN_DEPLOYMENT_VERSION_OSX}" \
# LDFLAGS="-mmacosx-version-min=${DARWIN_DEPLOYMENT_VERSION_OSX}" \
def get_deployment_target():
return os.environ.get('MACOSX_DEPLOYMENT_TARGET', None)
def get_c_flags():
cflags = ''
# sdk_path = subprocess.check_output([
# 'xcrun',
# '--sdk', 'macosx',
# '--show-sdk-path']).rstrip()
# cflags += '-isysroot {}'.format(sdk_path)
deployment_target = get_deployment_target()
if deployment_target:
# cflags += ' -mmacosx-version-min={}'.format(deployment_target)
pass
return cflags
def get_cxx_flags():
return get_c_flags()
def get_common_linker_flags():
linker_flags = ""
deployment_target = get_deployment_target()
if deployment_target:
# if len(linker_flags) > 0:
# linker_flags += ' '
# linker_flags += '-mmacosx-version-min={}'.format(deployment_target)
pass
return linker_flags
def get_exe_linker_flags():
return get_common_linker_flags()
def get_shared_linker_flags():
return get_common_linker_flags()
def CMAKE_FLAGS():
return {
"Debug": [
"-DCMAKE_BUILD_TYPE=RelWithDebInfo",
"-DLLVM_ENABLE_ASSERTIONS=ON",
],
"DebugClang": [
"-DCMAKE_BUILD_TYPE=Debug",
"-DLLVM_ENABLE_ASSERTIONS=ON",
],
"Release": [
"-DCMAKE_BUILD_TYPE=Release",
"-DLLVM_ENABLE_ASSERTIONS=ON",
],
"BuildAndIntegration": [
"-DCMAKE_BUILD_TYPE=Release",
"-DLLVM_ENABLE_ASSERTIONS=OFF",
],
}
def CMAKE_ENVIRONMENT():
return {
}
#### COLLECTING ALL ARCHIVES ####
def collect_archives_in_path(path):
files = os.listdir(path)
# Only use libclang and libLLVM archives, and exclude libclang_rt
regexp = "^lib(clang[^_]|LLVM|gtest).*$"
return [
os.path.join(
path,
file) for file in files if file.endswith(".a") and re.match(
regexp,
file)]
def archive_list():
paths = library_paths()
archive_lists = [collect_archives_in_path(path) for path in paths]
return [archive for archive_list in archive_lists for archive in archive_list]
def write_archives_txt():
f = open(archives_txt(), 'w')
for archive in archive_list():
f.write(archive + "\n")
f.close()
#### COLLECTING REPOSITORY MD5S ####
def source_control_status(spec):
vcs_for_spec = vcs(spec)
if LLVM_HASH_INCLUDES_DIFFS():
return vcs_for_spec.status() + vcs_for_spec.diff()
else:
return vcs_for_spec.status()
def source_control_status_for_specs(specs):
statuses = [source_control_status(spec) for spec in specs]
return "".join(statuses)
def all_source_control_status():
return source_control_status_for_specs(XCODE_REPOSITORIES())
def md5(string):
m = hashlib.md5()
m.update(string)
return m.hexdigest()
def all_source_control_status_md5():
return md5(all_source_control_status())
#### CHECKING OUT AND BUILDING LLVM ####
def apply_patches(spec):
files = os.listdir(os.path.join(lldb_source_path(), 'scripts'))
patches = [
f for f in files if fnmatch.fnmatch(
f, spec['name'] + '.*.diff')]
for p in patches:
run_in_directory(["patch",
"-p1",
"-i",
os.path.join(lldb_source_path(),
'scripts',
p)],
spec['root'])
def check_out_if_needed(spec):
if not os.path.isdir(spec['root']):
vcs(spec).check_out()
apply_patches(spec)
def all_check_out_if_needed():
map(check_out_if_needed, XCODE_REPOSITORIES())
def should_build_llvm():
if build_type() == BuildType.Xcode:
# TODO use md5 sums
return True
def do_symlink(source_path, link_path):
print "Symlinking " + source_path + " to " + link_path
if os.path.islink(link_path):
os.remove(link_path)
if not os.path.exists(link_path):
os.symlink(source_path, link_path)
def setup_source_symlink(repo):
source_path = repo["root"]
link_path = os.path.join(lldb_source_path(), os.path.basename(source_path))
do_symlink(source_path, link_path)
def setup_source_symlinks():
map(setup_source_symlink, XCODE_REPOSITORIES())
def setup_build_symlink():
# We don't use the build symlinks in llvm.org Xcode-based builds.
if build_type() != BuildType.Xcode:
source_path = package_build_path()
link_path = expected_package_build_path()
do_symlink(source_path, link_path)
def should_run_cmake(cmake_build_dir):
# We need to run cmake if our llvm build directory doesn't yet exist.
if not os.path.exists(cmake_build_dir):
return True
# Wee also need to run cmake if for some reason we don't have a ninja
# build file. (Perhaps the cmake invocation failed, which this current
# build may have fixed).
ninja_path = os.path.join(cmake_build_dir, "build.ninja")
return not os.path.exists(ninja_path)
def cmake_environment():
cmake_env = join_dicts(os.environ, CMAKE_ENVIRONMENT())
return cmake_env
def is_executable(path):
return os.path.isfile(path) and os.access(path, os.X_OK)
def find_executable_in_paths(program, paths_to_check):
program_dir, program_name = os.path.split(program)
if program_dir:
if is_executable(program):
return program
else:
for path_dir in paths_to_check:
path_dir = path_dir.strip('"')
executable_file = os.path.join(path_dir, program)
if is_executable(executable_file):
return executable_file
return None
def find_cmake():
# First check the system PATH env var for cmake
cmake_binary = find_executable_in_paths(
"cmake", os.environ["PATH"].split(os.pathsep))
if cmake_binary:
# We found it there, use it.
return cmake_binary
# Check a few more common spots. Xcode launched from Finder
# will have the default environment, and may not have
# all the normal places present.
extra_cmake_dirs = [
"/usr/local/bin",
"/opt/local/bin",
os.path.join(os.path.expanduser("~"), "bin")
]
if platform.system() == "Darwin":
# Add locations where an official CMake.app package may be installed.
extra_cmake_dirs.extend([
os.path.join(
os.path.expanduser("~"),
"Applications",
"CMake.app",
"Contents",
"bin"),
os.path.join(
os.sep,
"Applications",
"CMake.app",
"Contents",
"bin")])
cmake_binary = find_executable_in_paths("cmake", extra_cmake_dirs)
if cmake_binary:
# We found it in one of the usual places. Use that.
return cmake_binary
# We couldn't find cmake. Tell the user what to do.
raise Exception(
"could not find cmake in PATH ({}) or in any of these locations ({}), "
"please install cmake or add a link to it in one of those locations".format(
os.environ["PATH"], extra_cmake_dirs))
def cmake_flags():
cmake_flags = CMAKE_FLAGS()[lldb_configuration()]
cmake_flags += ["-GNinja",
"-DCMAKE_C_COMPILER={}".format(get_c_compiler()),
"-DCMAKE_CXX_COMPILER={}".format(get_cxx_compiler()),
"-DCMAKE_INSTALL_PREFIX={}".format(expected_package_build_path_for("llvm")),
"-DCMAKE_C_FLAGS={}".format(get_c_flags()),
"-DCMAKE_CXX_FLAGS={}".format(get_cxx_flags()),
"-DCMAKE_EXE_LINKER_FLAGS={}".format(get_exe_linker_flags()),
"-DCMAKE_SHARED_LINKER_FLAGS={}".format(get_shared_linker_flags()),
"-DHAVE_CRASHREPORTER_INFO=1"]
deployment_target = get_deployment_target()
if deployment_target:
cmake_flags.append(
"-DCMAKE_OSX_DEPLOYMENT_TARGET={}".format(deployment_target))
return cmake_flags
def run_cmake(cmake_build_dir, ninja_binary_path):
cmake_binary = find_cmake()
print "found cmake binary: using \"{}\"".format(cmake_binary)
command_line = [cmake_binary] + cmake_flags() + [
"-DCMAKE_MAKE_PROGRAM={}".format(ninja_binary_path),
llvm_source_path()]
print "running cmake like so: ({}) in dir ({})".format(command_line, cmake_build_dir)
subprocess.check_call(
command_line,
cwd=cmake_build_dir,
env=cmake_environment())
def create_directories_as_needed(path):
try:
os.makedirs(path)
except OSError as error:
# An error indicating that the directory exists already is fine.
# Anything else should be passed along.
if error.errno != errno.EEXIST:
raise error
def run_cmake_if_needed(ninja_binary_path):
cmake_build_dir = package_build_path()
if should_run_cmake(cmake_build_dir):
# Create the build directory as needed
create_directories_as_needed(cmake_build_dir)
run_cmake(cmake_build_dir, ninja_binary_path)
def build_ninja_if_needed():
# First check if ninja is in our path. If so, there's nothing to do.
ninja_binary_path = find_executable_in_paths(
"ninja", os.environ["PATH"].split(os.pathsep))
if ninja_binary_path:
# It's on the path. cmake will find it. We're good.
print "found ninja here: \"{}\"".format(ninja_binary_path)
return ninja_binary_path
# Figure out if we need to build it.
ninja_build_dir = ninja_source_path()
ninja_binary_path = os.path.join(ninja_build_dir, "ninja")
if not is_executable(ninja_binary_path):
# Build ninja
command_line = ["python", "configure.py", "--bootstrap"]
print "building ninja like so: ({}) in dir ({})".format(command_line, ninja_build_dir)
subprocess.check_call(
command_line,
cwd=ninja_build_dir,
env=os.environ)
return ninja_binary_path
def join_dicts(dict1, dict2):
d = dict1.copy()
d.update(dict2)
return d
def build_llvm(ninja_binary_path):
cmake_build_dir = package_build_path()
subprocess.check_call(
[ninja_binary_path],
cwd=cmake_build_dir,
env=cmake_environment())
def build_llvm_if_needed():
if should_build_llvm():
ninja_binary_path = build_ninja_if_needed()
run_cmake_if_needed(ninja_binary_path)
build_llvm(ninja_binary_path)
setup_build_symlink()
#### MAIN LOGIC ####
if __name__ == "__main__":
all_check_out_if_needed()
build_llvm_if_needed()
write_archives_txt()
sys.exit(0)
|
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class WebApplicationFirewallPoliciesOperations(object):
"""WebApplicationFirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_09_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Lists all of the protection policies within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.WebApplicationFirewallPolicyListResult"]
"""Gets all the WAF policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either WebApplicationFirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_09_01.models.WebApplicationFirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('WebApplicationFirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies'} # type: ignore
def get(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Retrieve protection policy with specified name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def create_or_update(
self,
resource_group_name, # type: str
policy_name, # type: str
parameters, # type: "_models.WebApplicationFirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.WebApplicationFirewallPolicy"
"""Creates or update policy with specified rule set name within a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:param parameters: Policy to be created.
:type parameters: ~azure.mgmt.network.v2019_09_01.models.WebApplicationFirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:return: WebApplicationFirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_09_01.models.WebApplicationFirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.WebApplicationFirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_or_update.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'WebApplicationFirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('WebApplicationFirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-09-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param policy_name: The name of the policy.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'policyName': self._serialize.url("policy_name", policy_name, 'str', max_length=128, min_length=0),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ApplicationGatewayWebApplicationFirewallPolicies/{policyName}'} # type: ignore
|
|
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_allclose
from scipy.optimize import newton
from scipy.special import logit
from sklearn.utils import assert_all_finite
from sklearn.utils.fixes import sp_version, parse_version
import pytest
from sklearn.ensemble._hist_gradient_boosting.loss import _LOSSES
from sklearn.ensemble._hist_gradient_boosting.common import Y_DTYPE
from sklearn.ensemble._hist_gradient_boosting.common import G_H_DTYPE
from sklearn.utils._testing import skip_if_32bit
def get_derivatives_helper(loss):
"""Return get_gradients() and get_hessians() functions for a given loss.
"""
def get_gradients(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
return gradients
def get_hessians(y_true, raw_predictions):
# create gradients and hessians array, update inplace, and return
gradients = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
hessians = np.empty_like(raw_predictions, dtype=G_H_DTYPE)
loss.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
if loss.__class__.__name__ == 'LeastSquares':
# hessians aren't updated because they're constant:
# the value is 1 (and not 2) because the loss is actually an half
# least squares loss.
hessians = np.full_like(raw_predictions, fill_value=1)
elif loss.__class__.__name__ == 'LeastAbsoluteDeviation':
# hessians aren't updated because they're constant
hessians = np.full_like(raw_predictions, fill_value=0)
return hessians
return get_gradients, get_hessians
@pytest.mark.parametrize('loss, x0, y_true', [
('least_squares', -2., 42),
('least_squares', 117., 1.05),
('least_squares', 0., 0.),
# The argmin of binary_crossentropy for y_true=0 and y_true=1 is resp. -inf
# and +inf due to logit, cf. "complete separation". Therefore, we use
# 0 < y_true < 1.
('binary_crossentropy', 0.3, 0.1),
('binary_crossentropy', -12, 0.2),
('binary_crossentropy', 30, 0.9),
('poisson', 12., 1.),
('poisson', 0., 2.),
('poisson', -22., 10.),
])
@pytest.mark.skipif(sp_version == parse_version('1.2.0'),
reason='bug in scipy 1.2.0, see scipy issue #9608')
@skip_if_32bit
def test_derivatives(loss, x0, y_true):
# Check that gradients are zero when the loss is minimized on a single
# value/sample using Halley's method with the first and second order
# derivatives computed by the Loss instance.
# Note that methods of Loss instances operate on arrays while the newton
# root finder expects a scalar or a one-element array for this purpose.
loss = _LOSSES[loss](sample_weight=None)
y_true = np.array([y_true], dtype=Y_DTYPE)
x0 = np.array([x0], dtype=Y_DTYPE).reshape(1, 1)
get_gradients, get_hessians = get_derivatives_helper(loss)
def func(x: np.ndarray) -> np.ndarray:
if isinstance(loss, _LOSSES['binary_crossentropy']):
# Subtract a constant term such that the binary cross entropy
# has its minimum at zero, which is needed for the newton method.
actual_min = loss.pointwise_loss(y_true, logit(y_true))
return loss.pointwise_loss(y_true, x) - actual_min
else:
return loss.pointwise_loss(y_true, x)
def fprime(x: np.ndarray) -> np.ndarray:
return get_gradients(y_true, x)
def fprime2(x: np.ndarray) -> np.ndarray:
return get_hessians(y_true, x)
optimum = newton(func, x0=x0, fprime=fprime, fprime2=fprime2,
maxiter=70, tol=2e-8)
# Need to ravel arrays because assert_allclose requires matching dimensions
y_true = y_true.ravel()
optimum = optimum.ravel()
assert_allclose(loss.inverse_link_function(optimum), y_true)
assert_allclose(func(optimum), 0, atol=1e-14)
assert_allclose(get_gradients(y_true, optimum), 0, atol=1e-7)
@pytest.mark.parametrize('loss, n_classes, prediction_dim', [
('least_squares', 0, 1),
('least_absolute_deviation', 0, 1),
('binary_crossentropy', 2, 1),
('categorical_crossentropy', 3, 3),
('poisson', 0, 1),
])
@pytest.mark.skipif(Y_DTYPE != np.float64,
reason='Need 64 bits float precision for numerical checks')
def test_numerical_gradients(loss, n_classes, prediction_dim, seed=0):
# Make sure gradients and hessians computed in the loss are correct, by
# comparing with their approximations computed with finite central
# differences.
# See https://en.wikipedia.org/wiki/Finite_difference.
rng = np.random.RandomState(seed)
n_samples = 100
if loss in ('least_squares', 'least_absolute_deviation'):
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif loss in ('poisson'):
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
raw_predictions = rng.normal(
size=(prediction_dim, n_samples)
).astype(Y_DTYPE)
loss = _LOSSES[loss](sample_weight=None)
get_gradients, get_hessians = get_derivatives_helper(loss)
# only take gradients and hessians of first tree / class.
gradients = get_gradients(y_true, raw_predictions)[0, :].ravel()
hessians = get_hessians(y_true, raw_predictions)[0, :].ravel()
# Approximate gradients
# For multiclass loss, we should only change the predictions of one tree
# (here the first), hence the use of offset[0, :] += eps
# As a softmax is computed, offsetting the whole array by a constant would
# have no effect on the probabilities, and thus on the loss
eps = 1e-9
offset = np.zeros_like(raw_predictions)
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset / 2)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset / 2)
numerical_gradients = (f_plus_eps - f_minus_eps) / eps
# Approximate hessians
eps = 1e-4 # need big enough eps as we divide by its square
offset[0, :] = eps
f_plus_eps = loss.pointwise_loss(y_true, raw_predictions + offset)
f_minus_eps = loss.pointwise_loss(y_true, raw_predictions - offset)
f = loss.pointwise_loss(y_true, raw_predictions)
numerical_hessians = (f_plus_eps + f_minus_eps - 2 * f) / eps**2
assert_allclose(numerical_gradients, gradients, rtol=1e-4, atol=1e-7)
assert_allclose(numerical_hessians, hessians, rtol=1e-4, atol=1e-7)
def test_baseline_least_squares():
rng = np.random.RandomState(0)
loss = _LOSSES['least_squares'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the mean of all targets
assert_almost_equal(baseline_prediction, y_train.mean())
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
def test_baseline_least_absolute_deviation():
rng = np.random.RandomState(0)
loss = _LOSSES['least_absolute_deviation'](sample_weight=None)
y_train = rng.normal(size=100)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
# Make sure baseline prediction is the median of all targets
assert np.allclose(loss.inverse_link_function(baseline_prediction),
baseline_prediction)
assert baseline_prediction == pytest.approx(np.median(y_train))
def test_baseline_poisson():
rng = np.random.RandomState(0)
loss = _LOSSES['poisson'](sample_weight=None)
y_train = rng.poisson(size=100).astype(np.float64)
# Sanity check, make sure at least one sample is non-zero so we don't take
# log(0)
assert y_train.sum() > 0
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert np.isscalar(baseline_prediction)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Make sure baseline prediction produces the log of the mean of all targets
assert_almost_equal(np.log(y_train.mean()), baseline_prediction)
# Test baseline for y_true = 0
y_train.fill(0.)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
def test_baseline_binary_crossentropy():
rng = np.random.RandomState(0)
loss = _LOSSES['binary_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert_all_finite(baseline_prediction)
assert np.allclose(loss.inverse_link_function(baseline_prediction),
y_train[0])
# Make sure baseline prediction is equal to link_function(p), where p
# is the proba of the positive class. We want predict_proba() to return p,
# and by definition
# p = inverse_link_function(raw_prediction) = sigmoid(raw_prediction)
# So we want raw_prediction = link_function(p) = log(p / (1 - p))
y_train = rng.randint(0, 2, size=100).astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None, 1)
assert baseline_prediction.shape == tuple() # scalar
assert baseline_prediction.dtype == y_train.dtype
p = y_train.mean()
assert np.allclose(baseline_prediction, np.log(p / (1 - p)))
def test_baseline_categorical_crossentropy():
rng = np.random.RandomState(0)
prediction_dim = 4
loss = _LOSSES['categorical_crossentropy'](sample_weight=None)
for y_train in (np.zeros(shape=100), np.ones(shape=100)):
y_train = y_train.astype(np.float64)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.dtype == y_train.dtype
assert_all_finite(baseline_prediction)
# Same logic as for above test. Here inverse_link_function = softmax and
# link_function = log
y_train = rng.randint(0, prediction_dim + 1, size=100).astype(np.float32)
baseline_prediction = loss.get_baseline_prediction(y_train, None,
prediction_dim)
assert baseline_prediction.shape == (prediction_dim, 1)
for k in range(prediction_dim):
p = (y_train == k).mean()
assert np.allclose(baseline_prediction[k, :], np.log(p))
@pytest.mark.parametrize('loss, problem', [
('least_squares', 'regression'),
('least_absolute_deviation', 'regression'),
('binary_crossentropy', 'classification'),
('categorical_crossentropy', 'classification'),
('poisson', 'poisson_regression'),
])
@pytest.mark.parametrize('sample_weight', ['ones', 'random'])
def test_sample_weight_multiplies_gradients(loss, problem, sample_weight):
# Make sure that passing sample weights to the gradient and hessians
# computation methods is equivalent to multiplying by the weights.
rng = np.random.RandomState(42)
n_samples = 1000
if loss == 'categorical_crossentropy':
n_classes = prediction_dim = 3
else:
n_classes = prediction_dim = 1
if problem == 'regression':
y_true = rng.normal(size=n_samples).astype(Y_DTYPE)
elif problem == 'poisson_regression':
y_true = rng.poisson(size=n_samples).astype(Y_DTYPE)
else:
y_true = rng.randint(0, n_classes, size=n_samples).astype(Y_DTYPE)
if sample_weight == 'ones':
sample_weight = np.ones(shape=n_samples, dtype=Y_DTYPE)
else:
sample_weight = rng.normal(size=n_samples).astype(Y_DTYPE)
loss_ = _LOSSES[loss](sample_weight=sample_weight)
baseline_prediction = loss_.get_baseline_prediction(
y_true, None, prediction_dim
)
raw_predictions = np.zeros(shape=(prediction_dim, n_samples),
dtype=baseline_prediction.dtype)
raw_predictions += baseline_prediction
gradients = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients, hessians, y_true,
raw_predictions, None)
gradients_sw = np.empty(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
hessians_sw = np.ones(shape=(prediction_dim, n_samples), dtype=G_H_DTYPE)
loss_.update_gradients_and_hessians(gradients_sw, hessians_sw, y_true,
raw_predictions, sample_weight)
assert np.allclose(gradients * sample_weight, gradients_sw)
assert np.allclose(hessians * sample_weight, hessians_sw)
def test_init_gradient_and_hessians_sample_weight():
# Make sure that passing sample_weight to a loss correctly influences the
# hessians_are_constant attribute, and consequently the shape of the
# hessians array.
prediction_dim = 2
n_samples = 5
sample_weight = None
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=None)
assert loss.hessians_are_constant
assert hessians.shape == (1, 1)
sample_weight = np.ones(n_samples)
loss = _LOSSES['least_squares'](sample_weight=sample_weight)
_, hessians = loss.init_gradients_and_hessians(
n_samples=n_samples, prediction_dim=prediction_dim,
sample_weight=sample_weight)
assert not loss.hessians_are_constant
assert hessians.shape == (prediction_dim, n_samples)
|
|
# -*- coding: utf-8
# 'version': '0.3'
#
# Copyright (c) 2017, Stephen B, Hope, All rights reserved.
#
# CommAI-env Copyright (c) 2016-present, Facebook, Inc., All rights reserved.
# Round1 Copyright (c) 2017-present, GoodAI All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE_CHALLENGE file in the root directory of this source tree.
import unittest
import core.task as task
class TestEvents(unittest.TestCase):
"""
"""
def testTriggers(self):
"""
:return:
"""
class TestTask(task.Task):
"""
"""
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(TestTask, self).__init__(*args, **kwargs)
@task.on_start()
def start_handler(self, event):
"""
:param event:
:return:
"""
pass
@task.on_message()
def message_handler(self, event):
"""
:param event:
:return:
"""
pass
@task.on_timeout()
def timeout_handler(self, event):
"""
:param event:
:return:
"""
pass
@task.on_ended()
def ended_handler(self, event):
"""
:param event:
:return:
"""
pass
tt = TestTask(max_time=10)
triggers = tt.get_triggers()
handlers = set(map(lambda t: t.event_handler, triggers))
self.assertEqual(4, len(triggers))
self.assertIn(self.get_func(TestTask.start_handler), handlers)
self.assertIn(self.get_func(TestTask.message_handler), handlers)
self.assertIn(self.get_func(TestTask.timeout_handler), handlers)
self.assertIn(self.get_func(TestTask.ended_handler), handlers)
types = dict((t.event_handler, t.type) for t in triggers)
self.assertEqual(task.Start, types[self.get_func(TestTask.start_handler)])
self.assertEqual(task.MessageReceived, types[self .get_func(TestTask.message_handler)])
self.assertEqual(task.Timeout, types[self.get_func(TestTask.timeout_handler)])
self.assertEqual(task.Ended, types[self.get_func(TestTask.ended_handler)])
def testInheritance(self):
"""
:return:
"""
class BaseTask(task.Task):
"""
"""
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(BaseTask, self).__init__(*args, **kwargs)
@task.on_start()
def start_handler(self, event):
"""
:param event:
:return:
"""
pass
class ConcreteTask(BaseTask):
"""
"""
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(ConcreteTask, self).__init__(*args, **kwargs)
@task.on_start()
def start_handler(self, event):
""" # overridden handler
:param event:
:return:
"""
pass
tt = ConcreteTask(max_time=10)
triggers = tt.get_triggers()
handlers = set(map(lambda t: t.event_handler, triggers))
self.assertEqual(1, len(triggers))
# The start_handler must be the one of the overridden task
self.assertIn(self.get_func(ConcreteTask.start_handler), handlers)
self.assertFalse(self.get_func(BaseTask.start_handler) in handlers)
def testDynamicHandlers(self):
"""
:return:
"""
class TestTask(task.Task):
"""
"""
def __init__(self, *args, **kwargs):
"""
:param args:
:param kwargs:
"""
super(TestTask, self).__init__(*args, **kwargs)
@task.on_start()
def start_handler(self, event):
# TODO event not used
"""
:param event:
:return:
"""
def end_handler(self, event):
# TODO event not used
"""
:param self:
:param event:
:return:
"""
pass
# TODO defined outside init
self.end_handler_func = end_handler
self.add_handler(task.on_ended()(end_handler))
triggers = []
tt = TestTask(max_time=10)
class EnvironmentMock():
"""
"""
def __init__(self, triggers):
"""
:param triggers:
"""
self.triggers = triggers
def raise_event(self, event):
""" we only generate an init event
:param event:
:return:
"""
tt.start_handler(event)
def _register_task_trigger(self, task, trigger):
"""
:param task:
:param trigger:
:return:
"""
self.triggers.append(trigger)
# raise the start event
tt.start(EnvironmentMock(triggers))
triggers.extend(tt.get_triggers())
handlers = set(map(lambda t: t.event_handler, triggers))
self.assertEqual(2, len(triggers))
self.assertIn(self.get_func(TestTask.start_handler), handlers)
self.assertIn(self.get_func(tt.end_handler_func), handlers)
def get_func(self, method):
"""
:param method:
:return:
"""
try:
return method.im_func
except AttributeError: # Python 3
try:
return method.__func__
except AttributeError: # Python 3 (unbound method == func)
return method
def main():
unittest.main()
if __name__ == '__main__':
main()
|
|
from collections import OrderedDict
import hashlib
import uuid
from base64 import b64encode, b64decode
from datetime import datetime, date, time
from speaklater import _LazyString
from werkzeug.http import http_date, parse_date
from flask import Markup, json
from flask._compat import iteritems, text_type
from flask.debughelpers import UnexpectedUnicodeError
from flask import current_app, flash
from flask.json import JSONEncoder
from flask.sessions import SecureCookieSession, SecureCookieSessionInterface, SessionMixin, TaggedJSONSerializer
from cla_common.constants import ELIGIBILITY_STATES
from cla_public.apps.checker.api import post_to_is_eligible_api, ApiError
from cla_public.apps.checker.constants import (
F2F_CATEGORIES,
NO,
PASSPORTED_BENEFITS,
YES,
END_SERVICE_FLASH_MESSAGE,
CONTACT_PREFERENCE,
)
from cla_public.apps.checker.means_test import MeansTest
from cla_public.apps.checker.utils import passported
from cla_public.libs.utils import override_locale, category_id_to_name
class CustomJSONEncoder(JSONEncoder):
def default(self, obj):
if any([isinstance(obj, date), isinstance(obj, time), isinstance(obj, datetime)]):
return obj.isoformat()
elif isinstance(obj, _LazyString):
return unicode(obj)
return super(CustomJSONEncoder, self).default(obj)
class CheckerSessionObject(dict):
"Provides some convenience properties for inter-page logic"
_eligibility = None
def __init__(self, *args, **kwargs):
super(CheckerSessionObject, self).__init__(*args, **kwargs)
self._eligibility = None
self._reasons = None
def __setitem__(self, *args, **kwargs):
super(CheckerSessionObject, self).__setitem__(*args, **kwargs)
self._eligibility = None
self._reasons = None
def field(self, form_name, field_name, default=None):
return self.get(form_name, {}).get(field_name, default)
# TODO: Check if redundant because scope diagnosis manages F2F redirects
@property
def needs_face_to_face(self):
return self.category in F2F_CATEGORIES
@property
def ineligible_reasons(self):
return self._reasons or []
@property
def ineligible(self):
return self.eligibility == ELIGIBILITY_STATES.NO
@property
def eligibility(self):
if self._eligibility is None:
try:
self._eligibility, self._reasons = post_to_is_eligible_api()
except ApiError:
self._eligibility = ELIGIBILITY_STATES.UNKNOWN
return self._eligibility
@property
def need_more_info(self):
"""Show we need more information page instead of eligible"""
if self.eligibility == ELIGIBILITY_STATES.UNKNOWN:
return True
properties = self.field("PropertiesForm", "properties")
if properties:
return any([p["in_dispute"] == YES or p["other_shareholders"] == YES for p in properties])
return False
@property
def category(self):
return self.get("category")
@property
def category_name(self):
return category_id_to_name(self.category)
@property
def category_slug(self):
# force english translation for slug
cat_name = self.category_name
if cat_name:
with override_locale("en"):
slug = cat_name.lower().replace(" ", "-")
return slug
def is_yes(self, form, field):
return self.field(form, field, NO) == YES
@property
def has_savings(self):
return self.is_yes("AboutYouForm", "have_savings")
@property
def has_valuables(self):
return self.is_yes("AboutYouForm", "have_valuables")
@property
def has_savings_or_valuables(self):
return self.has_savings or self.has_valuables
@property
def owns_property(self):
return self.is_yes("AboutYouForm", "own_property")
@property
def is_on_benefits(self):
return self.is_yes("AboutYouForm", "on_benefits")
@property
def is_on_passported_benefits(self):
return self.is_on_benefits and passported(self.field("YourBenefitsForm", "benefits", []))
@property
def is_on_other_benefits(self):
benefits = set(self.field("YourBenefitsForm", "benefits", []))
other_benefits = benefits.difference(PASSPORTED_BENEFITS).difference({"child_benefit"})
return self.is_on_benefits and bool(other_benefits)
@property
def has_children(self):
return self.is_yes("AboutYouForm", "have_children")
@property
def has_dependants(self):
return self.is_yes("AboutYouForm", "have_dependants")
@property
def has_partner(self):
partner = self.is_yes("AboutYouForm", "have_partner")
in_dispute = self.is_yes("AboutYouForm", "in_dispute")
return partner and not in_dispute
@property
def is_employed(self):
return self.is_yes("AboutYouForm", "is_employed")
@property
def is_self_employed(self):
return self.is_yes("AboutYouForm", "is_self_employed")
@property
def partner_is_employed(self):
return self.has_partner and self.is_yes("AboutYouForm", "partner_is_employed")
@property
def partner_is_self_employed(self):
return self.has_partner and self.is_yes("AboutYouForm", "partner_is_self_employed")
@property
def aged_60_or_over(self):
return self.is_yes("AboutYouForm", "aged_60_or_over")
@property
def callback_time(self):
if self.contact_type == "thirdparty":
return self.field("ContactForm", "thirdparty", {}).get("time", None)
return self.field("ContactForm", "callback", {}).get("time", None)
def add_note(self, key, note):
notes = self.get("notes", OrderedDict())
notes[key] = note
self["notes"] = notes
def notes_object(self):
session = self
def format_note(note_item):
return u"{key}:\n{note}".format(key=note_item[0], note=note_item[1])
class Notes(object):
def api_payload(self):
return {"notes": u"\n\n".join(map(format_note, session.get("notes", {}).items()))}
return Notes()
@property
def contact_type(self):
return self.get("ContactForm", {}).get("contact_type")
class CheckerSession(SecureCookieSession, SessionMixin):
"Provides some convenience properties for inter-page logic"
_key = "checker"
_stored_key = "stored"
expires_override = None
def __init__(self, *args, **kwargs):
self.checker = CheckerSessionObject()
self.stored = {}
super(CheckerSession, self).__init__(*args, **kwargs)
@property
def checker(self):
return self[self._key]
@checker.setter
def checker(self, value):
checker = CheckerSessionObject()
checker.update(value)
self[self._key] = value
@property
def stored(self):
return self[self._stored_key]
@stored.setter
def stored(self, value):
assert isinstance(value, dict)
self[self._stored_key] = value
@property
def is_current(self):
return not self.get("is_expired", False) and self.checker
def store_checker_details(self):
outcome = self.stored.get("outcome", "incomplete")
self.stored = {
"case_ref": self.checker.get("case_ref"),
"callback_time": self.checker.callback_time,
"callback_requested": self.checker.contact_type
in [type[0] for type in CONTACT_PREFERENCE if type[0] != "call"],
"contact_type": self.checker.contact_type,
"category": self.checker.category,
"eligibility": self.checker.eligibility,
"outcome": outcome,
"adaptations": [k for k, v in self.checker.get("ContactForm", {}).get("adaptations", {}).items() if v],
}
def store(self, values_dict):
self.stored.update(values_dict)
def clear_checker(self):
if self.checker:
flash(unicode(END_SERVICE_FLASH_MESSAGE))
self.checker = CheckerSessionObject()
def clear(self):
if current_app.config["CLEAR_SESSION"]:
super(CheckerSession, self).clear()
self.checker = CheckerSessionObject()
class Tag:
def __init__(self):
self.data_types = [
(CheckerSessionObject, self.serialize_checker_session_object),
(MeansTest, self.serialize_means_test),
(tuple, self.serialize_tuple),
(uuid.UUID, self.serialize_uuid),
(bytes, self.serialize_bytes),
("markup", self.serialize_markup),
(list, self.serialize_list),
(datetime, self.serialize_datetime),
(dict, self.serialize_dict),
(str, self.serialize_string),
]
def serialize_checker_session_object(self, value):
return {" ch": dict((k, self.checkTag(v)) for k, v in iteritems(value))}
def serialize_means_test(self, value):
return {" mt": dict((k, self.checkTag(v)) for k, v in iteritems(value))}
def serialize_tuple(self, value):
return {" t": [self.checkTag(x) for x in value]}
def serialize_uuid(self, value):
return {" u": value.hex}
def serialize_bytes(self, value):
return {" b": b64encode(value).decode("ascii")}
def serialize_markup(self, value):
return {" m": text_type(value.__html__())}
def serialize_list(self, value):
return [self.checkTag(x) for x in value]
def serialize_datetime(self, value):
return {" d": http_date(value)}
def serialize_dict(self, value):
return dict((k, self.checkTag(v)) for k, v in iteritems(value))
def serialize_string(self, value):
try:
return text_type(value)
except UnicodeError:
raise UnexpectedUnicodeError(
u"A byte string with "
u"non-ASCII data was passed to the session system "
u"which can only store unicode strings. Consider "
u"base64 encoding your string (String was %r)" % value
)
def checkTag(self, value):
for data_type, method in self.data_types:
if data_type == "markup":
if callable(getattr(value, "__html__", None)):
return method(value)
elif isinstance(value, data_type):
return method(value)
return value
class CheckerTaggedJSONSerializer(TaggedJSONSerializer):
def dumps(self, value):
return json.dumps(Tag().checkTag(value), separators=(",", ":"))
def loads(self, value):
def object_hook(obj):
if len(obj) != 1:
return obj
the_key, the_value = next(iteritems(obj))
if the_key == " t":
return tuple(the_value)
elif the_key == " u":
return uuid.UUID(the_value)
elif the_key == " b":
return b64decode(the_value)
elif the_key == " m":
return Markup(the_value)
elif the_key == " d":
return parse_date(the_value)
elif the_key == " ch":
c = CheckerSessionObject()
c.update(the_value)
return c
elif the_key == " mt":
m = MeansTest()
m.update(the_value)
return m
return obj
return json.loads(value, object_hook=object_hook)
checker_session_serializer = CheckerTaggedJSONSerializer()
class CheckerSessionInterface(SecureCookieSessionInterface):
digest_method = staticmethod(hashlib.sha256)
session_class = CheckerSession
serializer = checker_session_serializer
# Need to override the expires so that we can set the
# session to expire 20 seconds from page close
def get_expiration_time(self, app, session):
if session.permanent:
if session.expires_override:
return session.expires_override
return datetime.utcnow() + app.permanent_session_lifetime
|
|
import base64
import json
import os
import responses
import pytest
import mapbox
import mapbox.services.uploads
username = 'rsbaumann'
access_token = unicode(os.environ['MAPBOX_ACCESS_TOKEN'])
upload_response_body = """
{{"progress": 0,
"modified": "date.test",
"error": null,
"tileset": "{username}.test1",
"complete": false,
"owner": "{username}",
"created": "date.test",
"id": "id.test",
"name": null}}""".format(username=username)
@responses.activate
def test_get_credentials():
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token)._get_credentials()
print res
assert res.status_code == 200
creds = res.json()
assert username in creds['url']
for k in ['key', 'bucket', 'url', 'accessKeyId',
'secretAccessKey', 'sessionToken']:
assert k in creds.keys()
@responses.activate
def test_create():
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body=upload_response_body, status=201,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'test1') # without username prefix
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
res2 = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1') # also takes full tileset
assert res2.status_code == 201
job = res2.json()
assert job['tileset'] == "{0}.test1".format(username)
@responses.activate
def test_create_name():
upload_response_body = """
{"progress": 0,
"modified": "date.test",
"error": null,
"tileset": "testuser.test1",
"complete": false,
"owner": "testuser",
"created": "date.test",
"id": "id.test",
"name": "testname"}"""
def request_callback(request):
payload = json.loads(request.body)
assert payload['name'] == "testname"
return (201, {}, upload_response_body)
responses.add_callback(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
callback=request_callback)
res = mapbox.Uploader(access_token=access_token).create(
'http://example.com/test.json', 'testuser.test1', name="testname")
assert res.status_code == 201
job = res.json()
assert job['name'] == "testname"
@responses.activate
def test_list():
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body="[{0}]".format(upload_response_body), status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).list()
assert res.status_code == 200
uploads = res.json()
assert len(uploads) == 1
assert json.loads(upload_response_body) in uploads
@responses.activate
def test_status():
job = json.loads(upload_response_body)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=upload_response_body, status=200,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).status(job)
assert res.status_code == 200
res = mapbox.Uploader(access_token=access_token).status(job['id'])
assert res.status_code == 200
status = res.json()
assert job == status
@responses.activate
def test_delete():
job = json.loads(upload_response_body)
responses.add(
responses.DELETE,
'https://api.mapbox.com/uploads/v1/{0}/{1}?access_token={2}'.format(username, job['id'], access_token),
match_querystring=True,
body=None, status=204,
content_type='application/json')
res = mapbox.Uploader(access_token=access_token).delete(job)
assert res.status_code == 204
res = mapbox.Uploader(access_token=access_token).delete(job['id'])
assert res.status_code == 204
class MockSession(object):
"""Mocks a boto3 session."""
def __init__(self, *args, **kwargs):
self.bucket = None
self.key = None
pass
def resource(self, name):
self.resource_name = name
return self
def Object(self, bucket, key):
assert self.resource_name == 's3'
self.bucket = bucket
self.key = key
return self
def put(self, Body):
assert self.bucket
assert self.key
self.body = Body
return True
def Bucket(self, bucket):
self.bucket = bucket
return self
def upload_file(self, filename, key):
assert self.bucket
self.filename = filename
self.key = key
return True
@responses.activate
def test_stage(monkeypatch):
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
stage_url = mapbox.Uploader(access_token=access_token).stage(src)
assert stage_url.startswith("https://tilestream-tilesets-production.s3.amazonaws.com/_pending")
@responses.activate
def test_big_stage(tmpdir, monkeypatch):
"""Files larger than 1M are multipart uploaded."""
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
# Make a temp file larger than 1MB.
bigfile = tmpdir.join('big.txt')
bigfile.write(','.join(('num' for num in range(1000000))))
assert bigfile.size() > 1000000
with bigfile.open() as src:
stage_url = mapbox.Uploader(access_token=access_token).stage(src)
assert stage_url.startswith("https://tilestream-tilesets-production.s3.amazonaws.com/_pending")
@responses.activate
def test_upload(monkeypatch):
"""Upload a file and create a tileset"""
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body=upload_response_body, status=201,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
res = mapbox.Uploader(access_token=access_token).upload(src, 'test1')
assert res.status_code == 201
job = res.json()
assert job['tileset'] == "{0}.test1".format(username)
@responses.activate
def test_upload_error(monkeypatch):
"""Upload a file and create a tileset, fails with 409"""
monkeypatch.setattr(mapbox.services.uploads, 'boto3_session', MockSession)
# Credentials
query_body = """
{{"key": "_pending/{username}/key.test",
"accessKeyId": "ak.test",
"bucket": "tilestream-tilesets-production",
"url": "https://tilestream-tilesets-production.s3.amazonaws.com/_pending/{username}/key.test",
"secretAccessKey": "sak.test",
"sessionToken": "st.test"}}""".format(username=username)
responses.add(
responses.GET,
'https://api.mapbox.com/uploads/v1/{0}/credentials?access_token={1}'.format(username, access_token),
match_querystring=True,
body=query_body, status=200,
content_type='application/json')
responses.add(
responses.POST,
'https://api.mapbox.com/uploads/v1/{0}?access_token={1}'.format(username, access_token),
match_querystring=True,
body="", status=409,
content_type='application/json')
with open('tests/moors.json', 'r') as src:
res = mapbox.Uploader(access_token=access_token).upload(src, 'test1')
assert res.status_code == 409
def test_invalid_fileobj():
"""Must be file object, not path"""
with pytest.raises(mapbox.errors.InvalidFileError):
mapbox.Uploader(access_token=access_token).upload(
'tests/moors.json', 'test1')
|
|
import pytest
from mock import MagicMock
from requests import Timeout
from scrapyd_api.compat import StringIO
from scrapyd_api.constants import (
ADD_VERSION_ENDPOINT,
CANCEL_ENDPOINT,
FINISHED,
PENDING
)
from scrapyd_api.wrapper import ScrapydAPI
HOST_URL = 'http://localhost'
AUTH = ('username', 'password')
PROJECT = 'project'
VERSION = '45'
SPIDER = 'spider'
JOB = 'd131dd02c5e6eec4693d9a0698aff95c'
def test_auth_gets_applied_when_client_is_not_supplied():
"""
Auth details should get correctly passed to the client
when no client is provided.
"""
api = ScrapydAPI(HOST_URL, auth=AUTH)
assert api.client.auth == AUTH
def test_auth_doesnt_get_applied_when_client_is_supplied():
"""
Auth details should not get set on a passed client.
Instantiated clients should handle auth themselves.
"""
mock_client = MagicMock()
api = ScrapydAPI(HOST_URL, auth=AUTH, client=mock_client)
assert api.client.auth != AUTH
def test_build_url_with_default_endpoints():
"""
Absolute URL constructor should form correct URL when
the client is relying on the default endpoints.
"""
api = ScrapydAPI('http://localhost')
url = api._build_url(ADD_VERSION_ENDPOINT)
assert url == 'http://localhost/addversion.json'
# Test trailing slash on target.
api = ScrapydAPI('http://localhost/')
url = api._build_url(ADD_VERSION_ENDPOINT)
assert url == 'http://localhost/addversion.json'
def test_build_url_with_custom_endpoints():
"""
The absolute URL constructor should correctly form URL when
the client has custom endpoints passed in.
"""
custom_endpoints = {
ADD_VERSION_ENDPOINT: '/addversion-custom.json'
}
api = ScrapydAPI('http://localhost', endpoints=custom_endpoints)
url = api._build_url(ADD_VERSION_ENDPOINT)
assert url == 'http://localhost/addversion-custom.json'
# Test trailing slash on target.
api = ScrapydAPI('http://localhost/', endpoints=custom_endpoints)
url = api._build_url(ADD_VERSION_ENDPOINT)
assert url == 'http://localhost/addversion-custom.json'
# Test that endpoints that were not overridden by the custom_endpoints
# still work as the defaults.
url = api._build_url(CANCEL_ENDPOINT)
assert url == 'http://localhost/cancel.json'
def test_build_url_with_non_existant_endpoint_errors():
"""
Supplying _build_url with an endpoint that does not exist in
the endpoints dictionary should result in a ValueError.
"""
api = ScrapydAPI(HOST_URL)
with pytest.raises(ValueError):
api._build_url('does-not-exist')
def test_add_version():
mock_client = MagicMock()
mock_client.post.return_value = {
'spiders': 3
}
api = ScrapydAPI(HOST_URL, client=mock_client)
test_egg = StringIO('Test egg')
rtn = api.add_version(PROJECT, VERSION, test_egg)
assert rtn == 3 # The number of spiders uploaded.
mock_client.post.assert_called_with(
'http://localhost/addversion.json',
data={
'project': PROJECT,
'version': VERSION
},
files={
'egg': test_egg
},
timeout=None
)
def test_cancelling_running_job():
mock_client = MagicMock()
mock_client.post.return_value = {
'prevstate': 'running',
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.cancel(PROJECT, JOB)
assert rtn is 'running'
mock_client.post.assert_called_with(
'http://localhost/cancel.json',
data={
'project': PROJECT,
'job': JOB
},
timeout=None
)
def test_cancelling_pending_job():
mock_client = MagicMock()
mock_client.post.return_value = {
'prevstate': 'pending',
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.cancel(PROJECT, JOB)
assert rtn is 'pending'
mock_client.post.assert_called_with(
'http://localhost/cancel.json',
data={
'project': PROJECT,
'job': JOB
},
timeout=None
)
def test_cancelling_with_specific_signal():
mock_client = MagicMock()
mock_client.post.return_value = {
'prevstate': 'running',
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.cancel(PROJECT, JOB, signal='TERM')
assert rtn is 'running'
mock_client.post.assert_called_with(
'http://localhost/cancel.json',
data={
'project': PROJECT,
'job': JOB,
'signal': 'TERM'
},
timeout=None
)
def test_delete_project():
mock_client = MagicMock()
mock_client.post.return_value = {}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.delete_project(PROJECT)
assert rtn is True
mock_client.post.assert_called_with(
'http://localhost/delproject.json',
data={
'project': PROJECT,
},
timeout=None
)
def test_delete_version():
mock_client = MagicMock()
mock_client.post.return_value = {}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.delete_version(PROJECT, VERSION)
assert rtn is True
mock_client.post.assert_called_with(
'http://localhost/delversion.json',
data={
'project': PROJECT,
'version': VERSION
},
timeout=None
)
def test_job_status():
mock_client = MagicMock()
mock_client.get.return_value = {
'pending': [{'id': 'abc'}, {'id': 'def'}],
'running': [],
'finished': [{'id': 'ghi'}],
}
api = ScrapydAPI(HOST_URL, client=mock_client)
expected_results = (
('abc', PENDING),
('def', PENDING),
('ghi', FINISHED),
('xyz', '')
)
for job_id, expected_result in expected_results:
rtn = api.job_status(PROJECT, job_id)
assert rtn == expected_result
def test_list_jobs():
mock_client = MagicMock()
mock_client.get.return_value = {
'pending': [{'id': 'abc'}, {'id': 'def'}],
'running': [],
'finished': [{'id': 'ghi'}],
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.list_jobs(PROJECT)
assert len(rtn) == 3
assert sorted(rtn.keys()) == ['finished', 'pending', 'running']
assert rtn['pending'] == [{'id': 'abc'}, {'id': 'def'}]
assert rtn['finished'] == [{'id': 'ghi'}]
assert rtn['running'] == []
mock_client.get.assert_called_with(
'http://localhost/listjobs.json',
params={
'project': PROJECT,
},
timeout=None
)
def test_list_projects():
mock_client = MagicMock()
mock_client.get.return_value = {
'projects': ['test', 'test2']
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.list_projects()
assert rtn == ['test', 'test2']
mock_client.get.assert_called_with(
'http://localhost/listprojects.json',
timeout=None
)
def test_list_spiders():
mock_client = MagicMock()
mock_client.get.return_value = {
'spiders': ['spider', 'spider2']
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.list_spiders(PROJECT)
assert rtn == ['spider', 'spider2']
mock_client.get.assert_called_with(
'http://localhost/listspiders.json',
params={
'project': PROJECT,
},
timeout=None
)
def test_list_versions():
mock_client = MagicMock()
mock_client.get.return_value = {
'versions': ['version', 'version2']
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.list_versions(PROJECT)
assert rtn == ['version', 'version2']
mock_client.get.assert_called_with(
'http://localhost/listversions.json',
params={
'project': PROJECT,
},
timeout=None
)
def test_schedule():
mock_client = MagicMock()
job_id = 'ce54b67080280d1ec69821bcb6a88393'
settings = {
'BOT_NAME': 'Firefox',
'DOWNLOAD_DELAY': 2
}
kwargs = {'extra_detail': 'Test'}
mock_client.post.return_value = {
'jobid': job_id
}
api = ScrapydAPI(HOST_URL, client=mock_client)
rtn = api.schedule(PROJECT, SPIDER, settings=settings, **kwargs)
assert rtn == job_id
args, kwargs = mock_client.post.call_args
assert len(args) == 1
assert args[0] == 'http://localhost/schedule.json'
assert len(kwargs) == 2
assert 'data' in kwargs
data_kw = kwargs['data']
assert 'project' in data_kw
assert data_kw['project'] == PROJECT
assert 'extra_detail' in data_kw
assert data_kw['extra_detail'] == 'Test'
assert 'setting' in data_kw
assert sorted(data_kw['setting']) == ['BOT_NAME=Firefox',
'DOWNLOAD_DELAY=2']
assert 'spider' in data_kw
assert data_kw['spider'] == SPIDER
def test_request_timeout():
"""
The client should raise an exception when the server does not respond
in time limit.
"""
api = ScrapydAPI('http://httpbin.org/delay/5', timeout=1)
with pytest.raises(Timeout):
api.client.get(api.target, timeout=api.timeout)
def test_daemon_status():
api = ScrapydAPI(HOST_URL)
rtn = api.daemon_status()
assert isinstance(rtn, dict)
assert 'finished' in rtn
assert 'running' in rtn
assert 'pending' in rtn
assert 'node_name' in rtn
assert isinstance(rtn['finished'], int)
|
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
from pants.base.build_environment import (get_buildroot, get_default_pants_config_file,
get_pants_cachedir, get_pants_configdir, pants_version)
from pants.option.arg_splitter import GLOBAL_SCOPE
from pants.option.custom_types import dir_option
from pants.option.optionable import Optionable
from pants.option.scope import ScopeInfo
from pants.subsystem.subsystem_client_mixin import SubsystemClientMixin
class GlobalOptionsRegistrar(SubsystemClientMixin, Optionable):
options_scope = GLOBAL_SCOPE
options_scope_category = ScopeInfo.GLOBAL
@classmethod
def register_bootstrap_options(cls, register):
"""Register bootstrap options.
"Bootstrap options" are a small set of options whose values are useful when registering other
options. Therefore we must bootstrap them early, before other options are registered, let
alone parsed.
Bootstrap option values can be interpolated into the config file, and can be referenced
programatically in registration code, e.g., as register.bootstrap.pants_workdir.
Note that regular code can also access these options as normal global-scope options. Their
status as "bootstrap options" is only pertinent during option registration.
"""
buildroot = get_buildroot()
default_distdir_name = 'dist'
default_distdir = os.path.join(buildroot, default_distdir_name)
default_rel_distdir = '/{}/'.format(default_distdir_name)
# Although logging supports the WARN level, its not documented and could conceivably be yanked.
# Since pants has supported 'warn' since inception, leave the 'warn' choice as-is but explicitly
# setup a 'WARN' logging level name that maps to 'WARNING'.
logging.addLevelName(logging.WARNING, 'WARN')
register('-l', '--level', choices=['debug', 'info', 'warn'], default='info', recursive=True,
help='Set the logging level.')
register('-q', '--quiet', type=bool, recursive=True, daemon=False,
help='Squelches most console output. NOTE: Some tasks default to behaving quietly: '
'inverting this option supports making them noisier than they would be otherwise.')
# Not really needed in bootstrap options, but putting it here means it displays right
# after -l and -q in help output, which is conveniently contextual.
register('--colors', type=bool, default=sys.stdout.isatty(), recursive=True, daemon=False,
help='Set whether log messages are displayed in color.')
# Pants code uses this only to verify that we are of the requested version. However
# setup scripts, runner scripts, IDE plugins, etc., may grep this out of pants.ini
# and use it to select the right version.
# Note that to print the version of the pants instance you're running, use -v, -V or --version.
register('--pants-version', advanced=True, default=pants_version(),
help='Use this pants version.')
register('--plugins', advanced=True, type=list, help='Load these plugins.')
register('--plugin-cache-dir', advanced=True,
default=os.path.join(get_pants_cachedir(), 'plugins'),
help='Cache resolved plugin requirements here.')
register('--backend-packages', advanced=True, type=list,
default=['pants.backend.graph_info',
'pants.backend.python',
'pants.backend.jvm',
'pants.backend.codegen.antlr.java',
'pants.backend.codegen.antlr.python',
'pants.backend.codegen.jaxb',
'pants.backend.codegen.protobuf.java',
'pants.backend.codegen.ragel.java',
'pants.backend.codegen.thrift.java',
'pants.backend.codegen.thrift.python',
'pants.backend.codegen.wire.java',
'pants.backend.project_info'],
help='Load backends from these packages that are already on the path. '
'Add contrib and custom backends to this list.')
register('--pants-bootstrapdir', advanced=True, metavar='<dir>', default=get_pants_cachedir(),
help='Use this dir for global cache.')
register('--pants-configdir', advanced=True, metavar='<dir>', default=get_pants_configdir(),
help='Use this dir for global config files.')
register('--pants-workdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, '.pants.d'),
help='Write intermediate output files to this dir.')
register('--pants-supportdir', advanced=True, metavar='<dir>',
default=os.path.join(buildroot, 'build-support'),
help='Use support files from this dir.')
register('--pants-distdir', advanced=True, metavar='<dir>',
default=default_distdir,
help='Write end-product artifacts to this dir. If you modify this path, you '
'should also update --build-ignore and --pants-ignore to include the '
'custom dist dir path as well.')
register('--pants-subprocessdir', advanced=True, default=os.path.join(buildroot, '.pids'),
help='The directory to use for tracking subprocess metadata, if any. This should '
'live outside of the dir used by `--pants-workdir` to allow for tracking '
'subprocesses that outlive the workdir data (e.g. `./pants server`).')
register('--pants-config-files', advanced=True, type=list, daemon=False,
default=[get_default_pants_config_file()], help='Paths to Pants config files.')
# TODO: Deprecate the --pantsrc/--pantsrc-files options? This would require being able
# to set extra config file locations in an initial bootstrap config file.
register('--pantsrc', advanced=True, type=bool, default=True,
help='Use pantsrc files.')
register('--pantsrc-files', advanced=True, type=list, metavar='<path>', daemon=False,
default=['/etc/pantsrc', '~/.pants.rc'],
help='Override config with values from these files. '
'Later files override earlier ones.')
register('--pythonpath', advanced=True, type=list,
help='Add these directories to PYTHONPATH to search for plugins.')
register('--target-spec-file', type=list, dest='target_spec_files', daemon=False,
help='Read additional specs from this file, one per line')
register('--verify-config', type=bool, default=True, daemon=False,
help='Verify that all config file values correspond to known options.')
register('--build-ignore', advanced=True, type=list, fromfile=True,
default=['.*/', default_rel_distdir, 'bower_components/',
'node_modules/', '*.egg-info/'],
help='Paths to ignore when identifying BUILD files. '
'This does not affect any other filesystem operations. '
'Patterns use the gitignore pattern syntax (https://git-scm.com/docs/gitignore).')
register('--pants-ignore', advanced=True, type=list, fromfile=True,
default=['.*/', default_rel_distdir],
help='Paths to ignore for all filesystem operations performed by pants '
'(e.g. BUILD file scanning, glob matching, etc). '
'Patterns use the gitignore syntax (https://git-scm.com/docs/gitignore).')
register('--exclude-target-regexp', advanced=True, type=list, default=[], daemon=False,
metavar='<regexp>', help='Exclude target roots that match these regexes.')
register('--subproject-roots', type=list, advanced=True, fromfile=True, default=[],
help='Paths that correspond with build roots for any subproject that this '
'project depends on.')
# These logging options are registered in the bootstrap phase so that plugins can log during
# registration and not so that their values can be interpolated in configs.
register('-d', '--logdir', advanced=True, metavar='<dir>',
help='Write logs to files under this directory.')
# This facilitates bootstrap-time configuration of pantsd usage such that we can
# determine whether or not to use the Pailgun client to invoke a given pants run
# without resorting to heavier options parsing.
register('--enable-pantsd', advanced=True, type=bool, default=False,
help='Enables use of the pants daemon (and implicitly, the v2 engine). (Beta)')
# These facilitate configuring the native engine.
register('--native-engine-visualize-to', advanced=True, default=None, type=dir_option, daemon=False,
help='A directory to write execution and rule graphs to as `dot` files. The contents '
'of the directory will be overwritten if any filenames collide.')
# BinaryUtil options.
register('--binaries-baseurls', type=list, advanced=True,
default=['https://binaries.pantsbuild.org'],
help='List of URLs from which binary tools are downloaded. URLs are '
'searched in order until the requested path is found.')
register('--binaries-fetch-timeout-secs', type=int, default=30, advanced=True, daemon=False,
help='Timeout in seconds for URL reads when fetching binary tools from the '
'repos specified by --baseurls.')
register('--binaries-path-by-id', type=dict, advanced=True,
help=('Maps output of uname for a machine to a binary search path. e.g. '
'{("darwin", "15"): ["mac", "10.11"]), ("linux", "arm32"): ["linux"'
', "arm32"]}'))
# Pants Daemon options.
register('--pantsd-pailgun-host', advanced=True, default='127.0.0.1',
help='The host to bind the pants nailgun server to.')
register('--pantsd-pailgun-port', advanced=True, type=int, default=0,
help='The port to bind the pants nailgun server to. Defaults to a random port.')
register('--pantsd-log-dir', advanced=True, default=None,
help='The directory to log pantsd output to.')
register('--pantsd-fs-event-workers', advanced=True, type=int, default=4,
help='The number of workers to use for the filesystem event service executor pool.')
register('--pantsd-invalidation-globs', advanced=True, type=list, fromfile=True, default=[],
help='Filesystem events matching any of these globs will trigger a daemon restart.')
# Watchman options.
register('--watchman-version', advanced=True, default='4.9.0-pants1', help='Watchman version.')
register('--watchman-supportdir', advanced=True, default='bin/watchman',
help='Find watchman binaries under this dir. Used as part of the path to lookup '
'the binary with --binary-util-baseurls and --pants-bootstrapdir.')
register('--watchman-startup-timeout', type=float, advanced=True, default=30.0,
help='The watchman socket timeout (in seconds) for the initial `watch-project` command. '
'This may need to be set higher for larger repos due to watchman startup cost.')
register('--watchman-socket-timeout', type=float, advanced=True, default=5.0,
help='The watchman client socket timeout in seconds.')
register('--watchman-socket-path', type=str, advanced=True, default=None,
help='The path to the watchman UNIX socket. This can be overridden if the default '
'absolute path length exceeds the maximum allowed by the OS.')
# This option changes the parser behavior in a fundamental way (which currently invalidates
# all caches), and needs to be parsed out early, so we make it a bootstrap option.
register('--build-file-imports', choices=['allow', 'warn', 'error'], default='warn',
help='Whether to allow import statements in BUILD files')
@classmethod
def register_options(cls, register):
"""Register options not tied to any particular task or subsystem."""
# The bootstrap options need to be registered on the post-bootstrap Options instance, so it
# won't choke on them on the command line, and also so we can access their values as regular
# global-scope options, for convenience.
cls.register_bootstrap_options(register)
register('-x', '--time', type=bool,
help='Output a timing report at the end of the run.')
register('-e', '--explain', type=bool,
help='Explain the execution of goals.')
register('--tag', type=list, metavar='[+-]tag1,tag2,...',
help="Include only targets with these tags (optional '+' prefix) or without these "
"tags ('-' prefix). Useful with ::, to find subsets of targets "
"(e.g., integration tests.)")
register('-t', '--timeout', advanced=True, type=int, metavar='<seconds>',
help='Number of seconds to wait for http connections.')
# TODO: After moving to the new options system these abstraction leaks can go away.
register('-k', '--kill-nailguns', advanced=True, type=bool,
help='Kill nailguns before exiting')
register('--fail-fast', advanced=True, type=bool, recursive=True,
help='Exit as quickly as possible on error, rather than attempting to continue '
'to process the non-erroneous subset of the input.')
register('--cache-key-gen-version', advanced=True, default='200', recursive=True,
help='The cache key generation. Bump this to invalidate every artifact for a scope.')
register('--workdir-max-build-entries', advanced=True, type=int, default=8,
help='Maximum number of previous builds to keep per task target pair in workdir. '
'If set, minimum 2 will always be kept to support incremental compilation.')
register('--max-subprocess-args', advanced=True, type=int, default=100, recursive=True,
help='Used to limit the number of arguments passed to some subprocesses by breaking '
'the command up into multiple invocations.')
register('--print-exception-stacktrace', advanced=True, type=bool,
help='Print to console the full exception stack trace if encountered.')
register('--lock', advanced=True, type=bool, default=True,
help='Use a global lock to exclude other versions of pants from running during '
'critical operations.')
|
|
#!/usr/bin/python
"""
PlotContours.py
Plot individual contour plots of discrimination significance
for a given ensemble and DM mass...
BJK 30/06/2017
"""
import numpy as np
from numpy import pi
from scipy.integrate import quad
from scipy.interpolate import interp1d, interp2d
from scipy import ndimage
from matplotlib.ticker import MultipleLocator
import os.path
import sys
import CalcParamPoint as CPP
#------ Matplotlib parameters ------
import matplotlib.pyplot as pl
import matplotlib as mpl
import matplotlib.colors as colors
font = {'family' : 'sans-serif',
'size' : 17}
mpl.rcParams['xtick.major.size'] = 8
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 8
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rc('font', **font)
mpl.rc('text.latex', preamble=[r'\usepackage{color}', r'\usepackage{amssymb}'])
#-----------------------------------
#----Run Parameters---
if (len(sys.argv) != 3):
print " PlotContours.py requires 2 argument - e.g. PlotContours.py EXPT MX"
print " where EXPT = A, B, C, D and MX = DM mass in GeV"
print " Exiting..."
sys.exit()
expt = str(sys.argv[1])
mx = int(sys.argv[2])
print " Plotting contour-plot for ensemble " + expt + " and DM mass " + str(mx) + " GeV..."
#---Constants-----
# Proton-to-neutron ratios for different nuclei
R_Xe = (131.0-54)/54
R_Ar = (40-18.0)/18.0
R_Ge = (73.0-32.0)/32.0
R_Si = 1.0
R_Ca = 1.0
R_O = 1.0
R_W = (184-74.0)/74
#---Functions----
#Read in a list of significances for a given point (pID) in parameter space
#for a given reconstruction (reconID)
def getSigvals(reconID, pID):
#Filename for results file
fname = "../results/" + reconID + "/Results_p" + str(pID) +'.txt'
#Check if the file exists (and clean up the data if necessary)
if (os.path.exists(fname)):
data=np.loadtxt(fname)
data = data[data != float('+inf')]
data = data[~np.isnan(data)]
if (len(data) == 0):
data = [0]
else:
print " Error: File not found - " + fname
data = np.zeros(1)
return np.sort(data)
#Calculate significance (median, mean, upper, lower) for a given point and reconstruction
def getSignificance(reconID, pID, kind="Median"):
sigvals = getSigvals(reconID, pID)
Nsamps = len(sigvals)
if (kind == "Mean"):
return np.mean(sigvals)
if (kind == "Median"):
return np.median(sigvals)
if (kind == "Upper"):
return np.percentile(sigvals, 84.0)
if (kind == "Lower"):
return np.percentile(sigvals, 16.0)
ind = int(np.round(Nsamps*0.1))
return sigvals[ind]
#Calculating top axis from bottom axis
def inv_tick_function(X):
return X*1.0/np.sqrt(1+X**2)
#----Calculations---
fig, ax1 = pl.subplots(1,1, figsize=(7,6))
levels = np.array([1,2,3,4,5]) #Contours of sigma
colmap = pl.get_cmap("Greens") #Color map
reconID = "AP_Expt" + expt + "_" + str(mx)
#Number of grid points in the parameter space in each direction
#CPP (i.e. CalcParamPoint.py) transforms indices into values of couplings
Np = CPP.Np
xvals = CPP.frange #x-axis
yvals = CPP.Rrange #y-axis
sigvals = np.zeros(Np*Np)
#Get significance for each point
for i in range(Np*Np):
sigvals[i] = getSignificance(reconID, i+1, kind="Median")
zvals = np.reshape(sigvals, (Np, Np)).T
#Resample and filter the median results along the y-axis
Nvals = 50
xgrid = np.linspace(-1.0, -0.94, Nvals)
ygrid = np.linspace(0.6, 1.00, Nvals)
z_interp = interp2d(xvals, yvals, zvals, kind='linear')
xvals = xgrid*1.0
yvals = ygrid*1.0
zvals = z_interp(xvals, yvals)
for i in range(Nvals):
zvals[:,i] = ndimage.filters.median_filter(zvals[:,i], 5)
# Do some plotting
#Plot filled contours
cf = ax1.contourf(xvals, yvals, zvals, \
levels, cmap=colmap, extend='max')
#Plot contour lines
cons0 = ax1.contour(xvals, yvals, zvals, \
levels, colors='forestgreen')
#Find and plot maximum point
maxID = np.argmax(sigvals)
ax1.plot(CPP.getf(maxID+1), CPP.getR(maxID+1),ms=12,marker='*', mew=0.5, color='k')
print " Maximum significance: ", np.max(sigvals), "[INDEX " + str(maxID+1)+"]"
#Add red squares in some cases
if ((expt == "D" or expt == "A") and (mx == 50)):
ax1.plot(-0.995, 0.75, ms=8,marker='s',color='r', mew=0.5)
ax1.plot(-0.995, 0.8, ms=8,marker='s',color='r', mew=0.5)
ax1.set_xlim(-1.00, -0.94)
ax1.set_ylim(0.5, 1.1)
ax1.yaxis.set_major_locator(MultipleLocator(0.1))
#Add horizontal dashed lines for different elements
ax1.axhline(1.0/R_Xe, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Xe+0.008, r"Xe",ha="right")
ax1.axhline(1.0/R_Ar, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ar+0.008, r"Ar",ha="right")
if (expt == "A"):
ax1.axhline(1.0/R_Si, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Si-0.03, r"Si",ha="right")
if (expt == "B" or expt == "D"):
ax1.axhline(1.0/R_Ge, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ge+0.008, r"Ge",ha="right")
if (expt == "C" or expt == "D"):
ax1.axhline(1.0/R_Ca, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_Ca-0.03, r"Ca,O", ha="right")
ax1.axhline(1.0/R_W, linestyle="--", color='k')
ax1.text(-0.943, 1.0/R_W-0.03, r"W", ha="right")
#Sort out the top axis
ax2 = ax1.twiny()
topticks = np.array([-30,-10, -5,-4,-3])
ax2.set_xticks(inv_tick_function(topticks))
ax2.set_xticklabels(np.abs(topticks))
ax2.set_xlim(ax1.get_xlim())
#Add some labels
ax1.text(-0.942, 1.055, r"Ensemble " + expt + "; $m_\chi = " + str(mx) + "\,\,\mathrm{GeV}$", ha="right", fontsize=18.0)
ax1.text(-0.999, 0.52, r"Max. significance ($\bigstar$): $" + "{0:.1f}".format(np.max(sigvals)) + "\sigma$", fontsize=16.0)
ax1.set_ylabel(r'$\lambda_n/\lambda_p$', fontsize=20)
ax2.set_xlabel(r'$|\lambda_n^D/\lambda_n^{\overline{D}}|$', fontsize=20.0)
ax1.set_xticklabels(["-1", "-0.99", "-0.98", "-0.97", "-0.96","-0.95", "-0.94"])
fig.suptitle( r'$f = (\lambda_p^D \lambda_n^{D} + \lambda_p^{\overline{D}} \lambda_n^{\overline{D}})/ \sqrt{(\lambda_p^{D \,2} + \lambda_p^{\overline{D}\, 2})(\lambda_n^{D \,2} + \lambda_n^{\overline{D}\, 2})}$', \
ha='center',x=0.5, y=0.05, fontsize=20.0)
#Add colorbar
cbar_ax = fig.add_axes([0.96, 0.15, 0.015, 0.7])
cb0 = fig.colorbar(cf, cax=cbar_ax, ticks=levels, extend='max')
cb0.set_ticklabels([r'$1\sigma$',\
r'$2\sigma$',r'$3\sigma$',r'$4\sigma$',r'$5\sigma$'])
cb0.ax.tick_params(labelsize=18.0)
#Save to file
pl.savefig("../plots/individual/Contours-" + expt + "_" + str(mx) + ".pdf", bbox_inches="tight")
#pl.show()
|
|
################################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under the MIT License.
################################################################################
"""
A module for the binomial distribution node
"""
import numpy as np
import scipy.special as special
from .expfamily import (ExponentialFamily,
ExponentialFamilyDistribution,
useconstructor)
from .beta import BetaMoments
from .poisson import PoissonMoments
from .node import (Moments,
ensureparents)
from bayespy.utils import misc, random
class BinomialMoments(PoissonMoments):
"""
Class for the moments of binomial variables
"""
def __init__(self, N):
self.N = N
super().__init__()
def compute_fixed_moments(self, x):
"""
Compute the moments for a fixed value
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if np.any(x > self.N):
raise ValueError("Invalid count")
return super().compute_fixed_moments()
def compute_dims_from_values(self, x):
"""
Return the shape of the moments for a fixed value.
The realizations are scalars, thus the shape of the moment is ().
"""
raise DeprecationWarning()
return super().compute_dims_from_values()
class BinomialDistribution(ExponentialFamilyDistribution):
"""
Class for the VMP formulas of binomial variables.
"""
def __init__(self, N):
N = np.asanyarray(N)
if not misc.isinteger(N):
raise ValueError("Number of trials must be integer")
if np.any(N < 0):
raise ValueError("Number of trials must be non-negative")
self.N = np.asanyarray(N)
super().__init__()
def compute_message_to_parent(self, parent, index, u_self, u_p):
"""
Compute the message to a parent node.
"""
if index == 0:
x = u_self[0][...,None]
n = self.N[...,None]
m0 = x*[1, -1] + n*[0, 1]
m = [m0]
return m
else:
raise ValueError("Incorrect parent index")
def compute_phi_from_parents(self, u_p, mask=True):
"""
Compute the natural parameter vector given parent moments.
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
phi0 = logp0 - logp1
return [phi0]
def compute_moments_and_cgf(self, phi, mask=True):
"""
Compute the moments and :math:`g(\phi)`.
"""
u0 = self.N / (1 + np.exp(-phi[0]))
g = -self.N * np.log1p(np.exp(phi[0]))
return ( [u0], g )
def compute_cgf_from_parents(self, u_p):
"""
Compute :math:`\mathrm{E}_{q(p)}[g(p)]`
"""
logp0 = u_p[0][...,0]
logp1 = u_p[0][...,1]
return self.N * logp1
def compute_fixed_moments_and_f(self, x, mask=True):
"""
Compute the moments and :math:`f(x)` for a fixed value.
"""
# Make sure the values are integers in valid range
x = np.asanyarray(x)
if not misc.isinteger(x):
raise ValueError("Counts must be integer")
if np.any(x < 0) or np.any(x > self.N):
raise ValueError("Invalid count")
# Now, the moments are just the counts
u = [x]
f = (special.gammaln(self.N+1) -
special.gammaln(x+1) -
special.gammaln(self.N-x+1))
return (u, f)
def random(self, *phi, plates=None):
"""
Draw a random sample from the distribution.
"""
p = random.logodds_to_probability(phi[0])
return np.random.binomial(self.N, p, size=plates)
def squeeze(self, axis):
try:
N_squeezed = np.squeeze(self.N, axis)
except ValueError as err:
raise ValueError(
"The number of trials must be constant over a squeezed axis, "
"so the corresponding array axis must be singleton. "
"Cannot squeeze axis {0} from a binomial distribution "
"because the number of trials arrays has shape {2}, so "
"the given axis has length {1} != 1. ".format(
axis,
np.shape(self.N)[axis],
np.shape(self.N),
)
) from err
else:
return BinomialDistribution(N_squeezed)
class Binomial(ExponentialFamily):
r"""
Node for binomial random variables.
The node models the number of successes :math:`x \in \{0, \ldots, n\}` in
:math:`n` trials with probability :math:`p` for success:
.. math::
x \sim \mathrm{Binomial}(n, p).
Parameters
----------
n : scalar or array
Number of trials
p : beta-like node or scalar or array
Probability of a success in a trial
Examples
--------
>>> import warnings
>>> warnings.filterwarnings('ignore', category=RuntimeWarning)
>>> from bayespy.nodes import Binomial, Beta
>>> p = Beta([1e-3, 1e-3])
>>> x = Binomial(10, p)
>>> x.observe(7)
>>> p.update()
>>> import bayespy.plot as bpplt
>>> import numpy as np
>>> bpplt.pdf(p, np.linspace(0, 1, num=100))
[<matplotlib.lines.Line2D object at 0x...>]
See also
--------
Bernoulli, Multinomial, Beta
"""
def __init__(self, n, p, **kwargs):
"""
Create binomial node
"""
super().__init__(n, p, **kwargs)
@classmethod
def _constructor(cls, n, p, **kwargs):
"""
Constructs distribution and moments objects.
"""
p = cls._ensure_moments(p, BetaMoments)
parents = [p]
moments = BinomialMoments(n)
parent_moments = (p._moments,)
distribution = BinomialDistribution(n)
return ( parents,
kwargs,
( (), ),
cls._total_plates(kwargs.get('plates'),
distribution.plates_from_parent(0, p.plates),
np.shape(n)),
distribution,
moments,
parent_moments)
def __str__(self):
"""
Print the distribution using standard parameterization.
"""
p = 1 / (1 + np.exp(-self.phi[0]))
n = self._distribution.N
return ("%s ~ Binomial(n, p)\n"
" n = \n"
"%s\n"
" p = \n"
"%s\n"
% (self.name, n, p))
|
|
from django.conf import settings
from django.test import TestCase
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User, Permission
from grid.models import Grid, Element, Feature, GridPackage
from package.models import Package
from grid.tests import data
class FunctionalGridTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = False
def test_grid_list_view(self):
url = reverse('grids')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/grids.html')
def test_grid_detail_view(self):
url = reverse('grid', kwargs={'slug': 'testing'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/grid_detail.html')
def test_grid_detail_feature_view(self):
url = reverse('grid_detail_feature',
kwargs={'slug': 'testing',
'feature_id': '1',
'bogus_slug': '508-compliant'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/grid_detail_feature.html')
def test_grid_detail_feature_view_contents(self):
url = reverse('grid_detail_feature',
kwargs={'slug': 'testing',
'feature_id': '1',
'bogus_slug': '508-compliant'})
response = self.client.get(url)
self.assertContains(response, '<a href="/">home</a>')
self.assertContains(response, '<a href="/grids/">grids</a>')
self.assertContains(response, '<a href="/grids/g/testing/">Testing</a>')
self.assertContains(response, 'Has tests?')
self.assertContains(response,
'<a href="/packages/p/testability/">Testability')
self.assertContains(response,
'<a href="/packages/p/supertester/">Supertester')
self.assertContains(response,
'<td class="clickable" id="element-f1-p1"><img')
self.assertNotContains(response,
'<td class="clickable" id="element-f1-p2"><img')
def test_add_grid_view(self):
url = reverse('add_grid')
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/add_grid.html')
# Test form post
count = Grid.objects.count()
response = self.client.post(url, {
'title': 'TEST TITLE',
'slug': 'test-title',
'description': 'Just a test description'
}, follow=True)
self.assertEqual(Grid.objects.count(), count + 1)
self.assertContains(response, 'TEST TITLE')
def test_edit_grid_view(self):
url = reverse('edit_grid', kwargs={'slug': 'testing'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/edit_grid.html')
# Test form post
count = Grid.objects.count()
response = self.client.post(url, {
'title': 'TEST TITLE',
'slug': 'testing',
'description': 'Just a test description'
}, follow=True)
self.assertEqual(Grid.objects.count(), count)
self.assertContains(response, 'TEST TITLE')
def test_add_feature_view(self):
url = reverse('add_feature', kwargs={'grid_slug': 'testing'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/add_feature.html')
# Test form post
count = Feature.objects.count()
response = self.client.post(url, {
'title': 'TEST TITLE',
'description': 'Just a test description'
}, follow=True)
self.assertEqual(Feature.objects.count(), count + 1)
self.assertContains(response, 'TEST TITLE')
def test_edit_feature_view(self):
url = reverse('edit_feature', kwargs={'id': '1'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/edit_feature.html')
# Test form post
count = Feature.objects.count()
response = self.client.post(url, {
'title': 'TEST TITLE',
'description': 'Just a test description'
}, follow=True)
self.assertEqual(Feature.objects.count(), count)
self.assertContains(response, 'TEST TITLE')
def test_delete_feature_view(self):
count = Feature.objects.count()
# Since this user doesn't have the appropriate permissions, none of the
# features should be deleted (thus the count should be the same).
self.assertTrue(self.client.login(username='user', password='user'))
url = reverse('delete_feature', kwargs={'id': '1'})
self.client.get(url)
self.assertEqual(count, Feature.objects.count())
# Once we log in with the appropriate user, the request should delete
# the given feature, reducing the count by one.
self.assertTrue(self.client.login(username='cleaner', password='cleaner'))
self.client.get(url)
self.assertEqual(Feature.objects.count(), count - 1)
def test_edit_element_view(self):
url = reverse('edit_element', kwargs={'feature_id': '1', 'package_id': '1'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/edit_element.html')
# Test form post
count = Element.objects.count()
response = self.client.post(url, {
'text': 'Some random text',
}, follow=True)
self.assertEqual(Element.objects.count(), count)
self.assertContains(response, 'Some random text')
# Confirm 404 if grid IDs differ
url = reverse('edit_element', kwargs={'feature_id': '1', 'package_id': '4'})
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_add_grid_package_view(self):
url = reverse('add_grid_package', kwargs={'grid_slug': 'testing'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/add_grid_package.html')
# Test form post for existing grid package
response = self.client.post(url, {
'package': 2,
})
self.assertContains(response,
''Supertester' is already in this grid.')
# Test form post for new grid package
count = GridPackage.objects.count()
response = self.client.post(url, {
'package': 4,
}, follow=True)
self.assertEqual(GridPackage.objects.count(), count + 1)
self.assertContains(response, 'Another Test')
def test_add_new_grid_package_view(self):
url = reverse('add_new_grid_package', kwargs={'grid_slug': 'testing'})
response = self.client.get(url)
# The response should be a redirect, since the user is not logged in.
self.assertEqual(response.status_code, 302)
# Once we log in the user, we should get back the appropriate response.
self.assertTrue(self.client.login(username='user', password='user'))
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'package/package_form.html')
# Test form post
count = Package.objects.count()
response = self.client.post(url, {
'repo_url': 'http://www.example.com',
'title': 'Test package',
'slug': 'test-package',
'pypi_url': 'http://pypi.python.org/pypi/mogo/0.1.1',
'category': 1
}, follow=True)
self.assertEqual(Package.objects.count(), count + 1)
self.assertContains(response, 'Test package')
def test_ajax_grid_list_view(self):
url = reverse('ajax_grid_list') + '?q=Testing&package_id=4'
response = self.client.get(url)
self.assertContains(response, 'Testing')
def test_delete_gridpackage_view(self):
count = GridPackage.objects.count()
# Since this user doesn't have the appropriate permissions, none of the
# features should be deleted (thus the count should be the same).
self.assertTrue(self.client.login(username='user', password='user'))
url = reverse('delete_grid_package', kwargs={'id': '1'})
self.client.get(url)
self.assertEqual(count, GridPackage.objects.count())
# Once we log in with the appropriate user, the request should delete
# the given feature, reducing the count by one.
self.assertTrue(self.client.login(username='cleaner', password='cleaner'))
self.client.get(url)
self.assertEqual(count - 1, GridPackage.objects.count())
def test_latest_grids_view(self):
url = reverse('latest_grids')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/grid_archive.html')
class RegressionGridTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = False
def test_edit_element_view_for_nonexistent_elements(self):
"""Make sure that attempts to edit nonexistent elements succeed.
"""
# Delete the element for the sepcified feature and package.
element, created = Element.objects.get_or_create(feature=1, grid_package=1)
element.delete()
# Log in the test user and attempt to edit the element.
self.assertTrue(self.client.login(username='user', password='user'))
url = reverse('edit_element', kwargs={'feature_id': '1', 'package_id': '1'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'grid/edit_element.html')
class GridPermissionTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = True
self.test_add_url = reverse('add_grid')
self.test_edit_url = reverse('edit_grid', kwargs={'slug': 'testing'})
self.login = self.client.login(username='user', password='user')
self.user = User.objects.get(username='user')
def test_add_grid_permission_fail(self):
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 403)
def test_add_grid_permission_success(self):
add_grid_perm = Permission.objects.get(codename='add_grid',
content_type__app_label='grid')
self.user.user_permissions.add(add_grid_perm)
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 200)
def test_edit_grid_permission_fail(self):
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 403)
def test_edit_grid_permission_success(self):
edit_grid_perm = Permission.objects.get(codename='change_grid',
content_type__app_label='grid')
self.user.user_permissions.add(edit_grid_perm)
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 200)
class GridPackagePermissionTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = True
self.test_add_url = reverse('add_grid_package',
kwargs={'grid_slug': 'testing'})
self.test_add_new_url = reverse('add_new_grid_package',
kwargs={'grid_slug': 'testing'})
self.test_delete_url = reverse('delete_grid_package',
kwargs={'id': '1'})
self.login = self.client.login(username='user', password='user')
self.user = User.objects.get(username='user')
def test_login(self):
self.assertTrue(self.login)
def test_add_grid_package_permission_fail(self):
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 403)
def test_add_grid_package_permission_success(self):
add_grid_perm = Permission.objects.get(codename='add_gridpackage',
content_type__app_label='grid')
self.user.user_permissions.add(add_grid_perm)
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 200)
def test_add_new_grid_package_permission_fail(self):
response = self.client.get(self.test_add_new_url)
self.assertEqual(response.status_code, 403)
def test_add_new_grid_package_permission_success(self):
add_new_grid_perm = Permission.objects.get(codename='add_gridpackage',
content_type__app_label='grid')
self.user.user_permissions.add(add_new_grid_perm)
response = self.client.get(self.test_add_new_url)
self.assertEqual(response.status_code, 200)
def test_delete_grid_package_permission_fail(self):
response = self.client.get(self.test_delete_url)
self.assertEqual(response.status_code, 302)
def test_delete_grid_package_permission_success(self):
delete_grid_perm = Permission.objects.get(codename='delete_gridpackage',
content_type__app_label='grid')
self.user.user_permissions.add(delete_grid_perm)
response = self.client.get(self.test_delete_url)
self.assertEqual(response.status_code, 302)
class GridFeaturePermissionTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = True
self.test_add_url = reverse('add_feature',
kwargs={'grid_slug': 'testing'})
self.test_edit_url = reverse('edit_feature', kwargs={'id': '1'})
self.test_delete_url = reverse('delete_feature', kwargs={'id': '1'})
self.login = self.client.login(username='user', password='user')
self.user = User.objects.get(username='user')
def test_add_feature_permission_fail(self):
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 403)
def test_add_feature_permission_success(self):
add_feature = Permission.objects.get(codename='add_feature',
content_type__app_label='grid')
self.user.user_permissions.add(add_feature)
response = self.client.get(self.test_add_url)
self.assertEqual(response.status_code, 200)
def test_edit_feature_permission_fail(self):
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 403)
def test_edit_feature_permission_success(self):
edit_feature = Permission.objects.get(codename='change_feature',
content_type__app_label='grid')
self.user.user_permissions.add(edit_feature)
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 200)
def test_delete_feature_permission_fail(self):
response = self.client.get(self.test_delete_url)
self.assertEqual(response.status_code, 302)
def test_delete_feature_permission_success(self):
delete_feature = Permission.objects.get(codename='delete_feature',
content_type__app_label='grid')
self.user.user_permissions.add(delete_feature)
response = self.client.get(self.test_delete_url)
self.assertEqual(response.status_code, 302)
class GridElementPermissionTest(TestCase):
def setUp(self):
data.load()
settings.RESTRICT_GRID_EDITORS = True
self.test_edit_url = reverse('edit_element',
kwargs={'feature_id': '1',
'package_id': '1'})
self.login = self.client.login(username='user', password='user')
self.user = User.objects.get(username='user')
def test_edit_element_permission_fail(self):
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 403)
def test_edit_element_permission_success(self):
edit_element = Permission.objects.get(codename='change_element',
content_type__app_label='grid')
self.user.user_permissions.add(edit_element)
response = self.client.get(self.test_edit_url)
self.assertEqual(response.status_code, 200)
|
|
# Copyright 2013 Nicira, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from neutronclient.common import exceptions as n_exc
from neutronclient.neutron import v2_0 as neutronv20
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import uuidutils
import six
from webob import exc
from nova import exception
from nova.i18n import _
from nova.network.neutronv2 import api as neutronapi
from nova.network.security_group import security_group_base
from nova import utils
LOG = logging.getLogger(__name__)
# NOTE: Neutron client has a max URL length of 8192, so we have
# to limit the number of IDs we include in any single search. Really
# doesn't seem to be any point in making this a config value.
MAX_SEARCH_IDS = 150
class SecurityGroupAPI(security_group_base.SecurityGroupBase):
id_is_uuid = True
def create_security_group(self, context, name, description):
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.create_security_group(
body).get('security_group')
except n_exc.BadRequest as e:
raise exception.Invalid(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception("Neutron Error creating security group %s", name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
elif e.status_code == 409:
self.raise_over_quota(six.text_type(e))
six.reraise(*exc_info)
return self._convert_to_nova_security_group_format(security_group)
def update_security_group(self, context, security_group,
name, description):
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_dict(name, description)
try:
security_group = neutron.update_security_group(
security_group['id'], body).get('security_group')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
LOG.exception("Neutron Error updating security group %s", name)
if e.status_code == 401:
# TODO(arosen) Cannot raise generic response from neutron here
# as this error code could be related to bad input or over
# quota
raise exc.HTTPBadRequest()
six.reraise(*exc_info)
return self._convert_to_nova_security_group_format(security_group)
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed, but not used because
Neutron is allowing any characters.
"""
# NOTE: If using nova-network as the backend, min_length is 1. However
# if using Neutron, Nova has allowed empty string as its history.
# So this min_length should be 0 for passing the existing requests.
utils.check_string_length(value, name=property, min_length=0,
max_length=255)
def _convert_to_nova_security_group_format(self, security_group):
nova_group = {}
nova_group['id'] = security_group['id']
nova_group['description'] = security_group['description']
nova_group['name'] = security_group['name']
nova_group['project_id'] = security_group['tenant_id']
nova_group['rules'] = []
for rule in security_group.get('security_group_rules', []):
if rule['direction'] == 'ingress':
nova_group['rules'].append(
self._convert_to_nova_security_group_rule_format(rule))
return nova_group
def _convert_to_nova_security_group_rule_format(self, rule):
nova_rule = {}
nova_rule['id'] = rule['id']
nova_rule['parent_group_id'] = rule['security_group_id']
nova_rule['protocol'] = rule['protocol']
if (nova_rule['protocol'] and rule.get('port_range_min') is None and
rule.get('port_range_max') is None):
if rule['protocol'].upper() in ['TCP', 'UDP']:
nova_rule['from_port'] = 1
nova_rule['to_port'] = 65535
else:
nova_rule['from_port'] = -1
nova_rule['to_port'] = -1
else:
nova_rule['from_port'] = rule.get('port_range_min')
nova_rule['to_port'] = rule.get('port_range_max')
nova_rule['group_id'] = rule['remote_group_id']
nova_rule['cidr'] = self.parse_cidr(rule.get('remote_ip_prefix'))
return nova_rule
def get(self, context, name=None, id=None, map_exception=False):
neutron = neutronapi.get_client(context)
try:
if not id and name:
# NOTE(flwang): The project id should be honoured so as to get
# the correct security group id when user(with admin role but
# non-admin project) try to query by name, so as to avoid
# getting more than duplicated records with the same name.
id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group', name, context.project_id)
group = neutron.show_security_group(id).get('security_group')
return self._convert_to_nova_security_group_format(group)
except n_exc.NeutronClientNoUniqueMatch as e:
raise exception.NoUniqueMatch(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group %s not found", name)
raise exception.SecurityGroupNotFound(six.text_type(e))
else:
LOG.error("Neutron Error: %s", e)
six.reraise(*exc_info)
except TypeError as e:
LOG.error("Neutron Error: %s", e)
msg = _("Invalid security group name: %(name)s.") % {"name": name}
raise exception.SecurityGroupNotFound(six.text_type(msg))
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
"""Returns list of security group rules owned by tenant."""
neutron = neutronapi.get_client(context)
params = {}
search_opts = search_opts if search_opts else {}
if names:
params['name'] = names
if ids:
params['id'] = ids
# NOTE(jeffrey4l): list all the security groups when following
# conditions are met
# * names and ids don't exist.
# * it is admin context and all_tenants exist in search_opts.
# * project is not specified.
list_all_tenants = (context.is_admin
and 'all_tenants' in search_opts
and not any([names, ids]))
# NOTE(jeffrey4l): The neutron doesn't have `all-tenants` concept.
# All the security group will be returned if the project/tenant
# id is not passed.
if project and not list_all_tenants:
params['tenant_id'] = project
try:
security_groups = neutron.list_security_groups(**params).get(
'security_groups')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error getting security groups")
converted_rules = []
for security_group in security_groups:
converted_rules.append(
self._convert_to_nova_security_group_format(security_group))
return converted_rules
def validate_id(self, id):
if not uuidutils.is_uuid_like(id):
msg = _("Security group id should be uuid")
self.raise_invalid_property(msg)
return id
def destroy(self, context, security_group):
"""This function deletes a security group."""
neutron = neutronapi.get_client(context)
try:
neutron.delete_security_group(security_group['id'])
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
self.raise_invalid_property(six.text_type(e))
else:
LOG.error("Neutron Error: %s", e)
six.reraise(*exc_info)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both. Multiple rules are
installed to a security group in neutron using bulk support.
"""
neutron = neutronapi.get_client(context)
body = self._make_neutron_security_group_rules_list(vals)
try:
rules = neutron.create_security_group_rule(
body).get('security_group_rules')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.exception("Neutron Error getting security group %s", name)
self.raise_not_found(six.text_type(e))
elif e.status_code == 409:
LOG.exception("Neutron Error adding rules to security "
"group %s", name)
self.raise_over_quota(six.text_type(e))
elif e.status_code == 400:
LOG.exception("Neutron Error: %s", e)
self.raise_invalid_property(six.text_type(e))
else:
LOG.exception("Neutron Error:")
six.reraise(*exc_info)
converted_rules = []
for rule in rules:
converted_rules.append(
self._convert_to_nova_security_group_rule_format(rule))
return converted_rules
def _make_neutron_security_group_dict(self, name, description):
return {'security_group': {'name': name,
'description': description}}
def _make_neutron_security_group_rules_list(self, rules):
new_rules = []
for rule in rules:
new_rule = {}
# nova only supports ingress rules so all rules are ingress.
new_rule['direction'] = "ingress"
new_rule['protocol'] = rule.get('protocol')
# FIXME(arosen) Nova does not expose ethertype on security group
# rules. Therefore, in the case of self referential rules we
# should probably assume they want to allow both IPv4 and IPv6.
# Unfortunately, this would require adding two rules in neutron.
# The reason we do not do this is because when the user using the
# nova api wants to remove the rule we'd have to have some way to
# know that we should delete both of these rules in neutron.
# For now, self referential rules only support IPv4.
if not rule.get('cidr'):
new_rule['ethertype'] = 'IPv4'
else:
new_rule['ethertype'] = utils.get_ip_version(rule.get('cidr'))
new_rule['remote_ip_prefix'] = rule.get('cidr')
new_rule['security_group_id'] = rule.get('parent_group_id')
new_rule['remote_group_id'] = rule.get('group_id')
if 'from_port' in rule and rule['from_port'] != -1:
new_rule['port_range_min'] = rule['from_port']
if 'to_port' in rule and rule['to_port'] != -1:
new_rule['port_range_max'] = rule['to_port']
new_rules.append(new_rule)
return {'security_group_rules': new_rules}
def remove_rules(self, context, security_group, rule_ids):
neutron = neutronapi.get_client(context)
rule_ids = set(rule_ids)
try:
# The ec2 api allows one to delete multiple security group rules
# at once. Since there is no bulk delete for neutron the best
# thing we can do is delete the rules one by one and hope this
# works.... :/
for rule_id in range(0, len(rule_ids)):
neutron.delete_security_group_rule(rule_ids.pop())
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error unable to delete %s", rule_ids)
def get_rule(self, context, id):
neutron = neutronapi.get_client(context)
try:
rule = neutron.show_security_group_rule(
id).get('security_group_rule')
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
LOG.debug("Neutron security group rule %s not found", id)
self.raise_not_found(six.text_type(e))
else:
LOG.error("Neutron Error: %s", e)
six.reraise(*exc_info)
return self._convert_to_nova_security_group_rule_format(rule)
def _get_ports_from_server_list(self, servers, neutron):
"""Returns a list of ports used by the servers."""
def _chunk_by_ids(servers, limit):
ids = []
for server in servers:
ids.append(server['id'])
if len(ids) >= limit:
yield ids
ids = []
if ids:
yield ids
# Note: Have to split the query up as the search criteria
# form part of the URL, which has a fixed max size
ports = []
for ids in _chunk_by_ids(servers, MAX_SEARCH_IDS):
search_opts = {'device_id': ids}
ports.extend(neutron.list_ports(**search_opts).get('ports'))
return ports
def _get_secgroups_from_port_list(self, ports, neutron):
"""Returns a dict of security groups keyed by their ids."""
def _chunk_by_ids(sg_ids, limit):
sg_id_list = []
for sg_id in sg_ids:
sg_id_list.append(sg_id)
if len(sg_id_list) >= limit:
yield sg_id_list
sg_id_list = []
if sg_id_list:
yield sg_id_list
# Find the set of unique SecGroup IDs to search for
sg_ids = set()
for port in ports:
sg_ids.update(port.get('security_groups', []))
# Note: Have to split the query up as the search criteria
# form part of the URL, which has a fixed max size
security_groups = {}
for sg_id_list in _chunk_by_ids(sg_ids, MAX_SEARCH_IDS):
sg_search_opts = {'id': sg_id_list}
search_results = neutron.list_security_groups(**sg_search_opts)
for sg in search_results.get('security_groups'):
security_groups[sg['id']] = sg
return security_groups
def get_instances_security_groups_bindings(self, context, servers,
detailed=False):
"""Returns a dict(instance_id, [security_groups]) to allow obtaining
all of the instances and their security groups in one shot.
"""
neutron = neutronapi.get_client(context)
ports = self._get_ports_from_server_list(servers, neutron)
security_groups = self._get_secgroups_from_port_list(ports, neutron)
instances_security_group_bindings = {}
for port in ports:
for port_sg_id in port.get('security_groups', []):
# Note: have to check we found port_sg as its possible
# the port has an SG that this user doesn't have access to
port_sg = security_groups.get(port_sg_id)
if port_sg:
if detailed:
sg_entry = self._convert_to_nova_security_group_format(
port_sg)
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
else:
# name is optional in neutron so if not specified
# return id
name = port_sg.get('name')
if not name:
name = port_sg.get('id')
sg_entry = {'name': name}
instances_security_group_bindings.setdefault(
port['device_id'], []).append(sg_entry)
return instances_security_group_bindings
def get_instance_security_groups(self, context, instance, detailed=False):
"""Returns the security groups that are associated with an instance.
If detailed is True then it also returns the full details of the
security groups associated with an instance.
"""
servers = [{'id': instance.uuid}]
sg_bindings = self.get_instances_security_groups_bindings(
context, servers, detailed)
return sg_bindings.get(instance.uuid, [])
def _has_security_group_requirements(self, port):
port_security_enabled = port.get('port_security_enabled', True)
has_ip = port.get('fixed_ips')
deferred_ip = port.get('ip_allocation') == 'deferred'
if has_ip or deferred_ip:
return port_security_enabled
return False
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
neutron = neutronapi.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group',
security_group_name,
context.project_id)
except n_exc.NeutronClientNoUniqueMatch as e:
raise exception.NoUniqueMatch(six.text_type(e))
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = (_("Security group %(name)s is not found for "
"project %(project)s") %
{'name': security_group_name,
'project': context.project_id})
self.raise_not_found(msg)
else:
LOG.exception("Neutron Error:")
six.reraise(*exc_info)
params = {'device_id': instance.uuid}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error:")
if not ports:
msg = (_("instance_id %s could not be found as device id on"
" any ports") % instance.uuid)
self.raise_not_found(msg)
for port in ports:
if not self._has_security_group_requirements(port):
LOG.warning("Cannot add security group %(name)s to "
"%(instance)s since the port %(port_id)s "
"does not meet security requirements",
{'name': security_group_name,
'instance': instance.uuid,
'port_id': port['id']})
raise exception.SecurityGroupCannotBeApplied()
if 'security_groups' not in port:
port['security_groups'] = []
port['security_groups'].append(security_group_id)
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info("Adding security group %(security_group_id)s to "
"port %(port_id)s",
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error:")
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
neutron = neutronapi.get_client(context)
try:
security_group_id = neutronv20.find_resourceid_by_name_or_id(
neutron, 'security_group',
security_group_name,
context.project_id)
except n_exc.NeutronClientException as e:
exc_info = sys.exc_info()
if e.status_code == 404:
msg = (_("Security group %(name)s is not found for "
"project %(project)s") %
{'name': security_group_name,
'project': context.project_id})
self.raise_not_found(msg)
else:
LOG.exception("Neutron Error:")
six.reraise(*exc_info)
params = {'device_id': instance.uuid}
try:
ports = neutron.list_ports(**params).get('ports')
except n_exc.NeutronClientException:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error:")
if not ports:
msg = (_("instance_id %s could not be found as device id on"
" any ports") % instance.uuid)
self.raise_not_found(msg)
found_security_group = False
for port in ports:
try:
port.get('security_groups', []).remove(security_group_id)
except ValueError:
# When removing a security group from an instance the security
# group should be on both ports since it was added this way if
# done through the nova api. In case it is not a 404 is only
# raised if the security group is not found on any of the
# ports on the instance.
continue
updated_port = {'security_groups': port['security_groups']}
try:
LOG.info("Adding security group %(security_group_id)s to "
"port %(port_id)s",
{'security_group_id': security_group_id,
'port_id': port['id']})
neutron.update_port(port['id'], {'port': updated_port})
found_security_group = True
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception("Neutron Error:")
if not found_security_group:
msg = (_("Security group %(security_group_name)s not associated "
"with the instance %(instance)s") %
{'security_group_name': security_group_name,
'instance': instance.uuid})
self.raise_not_found(msg)
def get_default_rule(self, context, id):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def get_all_default_rules(self, context):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def add_default_rules(self, context, vals):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def remove_default_rules(self, context, rule_ids):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
def default_rule_exists(self, context, values):
msg = _("Network driver does not support this function.")
raise exc.HTTPNotImplemented(explanation=msg)
|
|
# -*- coding: utf-8 -*-
"""SQLite parser plugin for Google Chrome history database files."""
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import webkit_time as dfdatetime_webkit_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class ChromeHistoryFileDownloadedEventData(events.EventData):
"""Chrome History file downloaded event data.
Attributes:
full_path (str): full path where the file was downloaded to.
received_bytes (int): number of bytes received while downloading.
total_bytes (int): total number of bytes to download.
url (str): URL of the downloaded file.
"""
DATA_TYPE = 'chrome:history:file_downloaded'
def __init__(self):
"""Initializes event data."""
super(ChromeHistoryFileDownloadedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.full_path = None
self.received_bytes = None
self.total_bytes = None
self.url = None
class ChromeHistoryPageVisitedEventData(events.EventData):
"""Chrome History page visited event data.
Attributes:
from_visit (str): URL where the visit originated from.
page_transition_type (int): type of transitions between pages.
title (str): title of the visited page.
typed_count (int): number of characters of the URL that were typed.
url (str): URL of the visited page.
url_hidden (bool): True if the URL is hidden.
visit_source (int): source of the page visit.
"""
DATA_TYPE = 'chrome:history:page_visited'
def __init__(self):
"""Initializes event data."""
super(ChromeHistoryPageVisitedEventData, self).__init__(
data_type=self.DATA_TYPE)
self.from_visit = None
self.page_transition_type = None
self.title = None
self.typed_count = None
self.url = None
self.url_hidden = None
self.visit_source = None
class BaseGoogleChromeHistoryPlugin(interface.SQLitePlugin):
"""SQLite parser plugin for Google Chrome history database files.
The Google Chrome history database file is typically stored in:
Archived History
History
Note that the Archived History database does not contain the downloads table.
"""
_SYNC_CACHE_QUERY = 'SELECT id, source FROM visit_source'
_URL_CACHE_QUERY = (
'SELECT visits.id AS id, urls.url, urls.title FROM '
'visits, urls WHERE urls.id = visits.url')
# https://cs.chromium.org/chromium/src/ui/base/page_transition_types.h?l=108
_PAGE_TRANSITION_CORE_MASK = 0xff
def _GetUrl(self, url, cache, database):
"""Retrieves an URL from a reference to an entry in the from_visit table.
Args:
url (str): URL.
cache (SQLiteCache): cache.
database (SQLiteDatabase): database.
Returns:
str: URL or an empty string if no URL was found.
"""
if not url:
return ''
url_cache_results = cache.GetResults('url')
if not url_cache_results:
result_set = database.Query(self._URL_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'url', 'id', ('url', 'title'))
url_cache_results = cache.GetResults('url')
reference_url, reference_title = url_cache_results.get(url, ['', ''])
if not reference_url:
return ''
return '{0:s} ({1:s})'.format(reference_url, reference_title)
def _GetVisitSource(self, visit_identifier, cache, database):
"""Retrieves a visit source type based on the identifier.
Args:
visit_identifier (str): identifier from the visits table for the
particular record.
cache (SQLiteCache): cache which contains cached results from querying
the visit_source table.
database (SQLiteDatabase): database.
Returns:
int: visit source type or None if no visit source type was found for
the identifier.
"""
sync_cache_results = cache.GetResults('sync')
if not sync_cache_results:
result_set = database.Query(self._SYNC_CACHE_QUERY)
cache.CacheQueryResults(result_set, 'sync', 'id', ('source',))
sync_cache_results = cache.GetResults('sync')
if sync_cache_results and visit_identifier:
results = sync_cache_results.get(visit_identifier, None)
if results:
return results[0]
return None
def ParseLastVisitedRow(
self, parser_mediator, query, row, cache=None, database=None,
**unused_kwargs):
"""Parses a last visited row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
cache (SQLiteCache): cache which contains cached results from querying
the visits and urls tables.
database (Optional[SQLiteDatabase]): database.
"""
query_hash = hash(query)
hidden = self._GetRowValue(query_hash, row, 'hidden')
transition = self._GetRowValue(query_hash, row, 'transition')
visit_identifier = self._GetRowValue(query_hash, row, 'visit_id')
from_visit = self._GetRowValue(query_hash, row, 'from_visit')
event_data = ChromeHistoryPageVisitedEventData()
event_data.from_visit = self._GetUrl(from_visit, cache, database)
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.page_transition_type = (
transition & self._PAGE_TRANSITION_CORE_MASK)
event_data.title = self._GetRowValue(query_hash, row, 'title')
event_data.typed_count = self._GetRowValue(query_hash, row, 'typed_count')
event_data.url = self._GetRowValue(query_hash, row, 'url')
event_data.url_hidden = hidden == '1'
event_data.visit_source = self._GetVisitSource(
visit_identifier, cache, database)
timestamp = self._GetRowValue(query_hash, row, 'visit_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_LAST_VISITED)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleChrome8HistoryPlugin(BaseGoogleChromeHistoryPlugin):
"""SQLite parser plugin for Google Chrome 8 - 25 history database files."""
NAME = 'chrome_8_history'
DATA_FORMAT = 'Google Chrome 8 - 25 history SQLite database file'
REQUIRED_STRUCTURE = {
'downloads': frozenset([
'id', 'full_path', 'received_bytes', 'total_bytes', 'url',
'start_time']),
'urls': frozenset([
'id', 'url', 'title', 'visit_count', 'typed_count',
'last_visit_time', 'hidden']),
'visits': frozenset([
'visit_time', 'from_visit', 'transition', 'id'])}
QUERIES = [
(('SELECT urls.id, urls.url, urls.title, urls.visit_count, '
'urls.typed_count, urls.last_visit_time, urls.hidden, visits.'
'visit_time, visits.from_visit, visits.transition, visits.id '
'AS visit_id FROM urls, visits WHERE urls.id = visits.url ORDER '
'BY visits.visit_time'), 'ParseLastVisitedRow'),
(('SELECT id, full_path, url, start_time, received_bytes, '
'total_bytes FROM downloads'), 'ParseFileDownloadedRow')]
_SCHEMA_8 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY '
'KEY,value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_16 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY '
'KEY,value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_19 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed BOOLEAN)')}
_SCHEMA_20 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,full_path '
'LONGVARCHAR NOT NULL,url LONGVARCHAR NOT NULL,start_time INTEGER '
'NOT NULL,received_bytes INTEGER NOT NULL,total_bytes INTEGER NOT '
'NULL,state INTEGER NOT NULL,end_time INTEGER NOT NULL,opened '
'INTEGER NOT NULL)'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'presentation': (
'CREATE TABLE presentation(url_id INTEGER PRIMARY KEY,pres_index '
'INTEGER NOT NULL)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL,pres_index INTEGER DEFAULT -1 NOT NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
SCHEMAS = [_SCHEMA_8, _SCHEMA_16, _SCHEMA_19, _SCHEMA_20]
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'full_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_posix_time.PosixTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
class GoogleChrome27HistoryPlugin(BaseGoogleChromeHistoryPlugin):
"""SQLite parser plugin for Google Chrome 27+ history database files."""
NAME = 'chrome_27_history'
DATA_FORMAT = 'Google Chrome 27 and later history SQLite database file'
REQUIRED_STRUCTURE = {
'downloads': frozenset([
'id', 'target_path', 'received_bytes', 'total_bytes', 'start_time']),
'downloads_url_chains': frozenset([
'id', 'url']),
'urls': frozenset([
'id', 'url', 'title', 'visit_count', 'typed_count',
'last_visit_time', 'hidden']),
'visits': frozenset([
'visit_time', 'from_visit', 'transition', 'id'])}
QUERIES = [
(('SELECT urls.id, urls.url, urls.title, urls.visit_count, '
'urls.typed_count, urls.last_visit_time, urls.hidden, visits.'
'visit_time, visits.from_visit, visits.transition, visits.id '
'AS visit_id FROM urls, visits WHERE urls.id = visits.url ORDER '
'BY visits.visit_time'), 'ParseLastVisitedRow'),
(('SELECT downloads.id AS id, downloads.start_time,'
'downloads.target_path, downloads_url_chains.url, '
'downloads.received_bytes, downloads.total_bytes FROM downloads,'
' downloads_url_chains WHERE downloads.id = '
'downloads_url_chains.id'), 'ParseFileDownloadedRow')]
_SCHEMA_27 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,is_indexed '
'BOOLEAN,visit_duration INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_31 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_37 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_51 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_58 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY,url LONGVARCHAR,title '
'LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT NULL,typed_count '
'INTEGER DEFAULT 0 NOT NULL,last_visit_time INTEGER NOT NULL,hidden '
'INTEGER DEFAULT 0 NOT NULL,favicon_id INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
_SCHEMA_59 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,last_access_time INTEGER NOT NULL,transient INTEGER NOT '
'NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT NULL,tab_url '
'VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT NULL,http_method '
'VARCHAR NOT NULL,by_ext_id VARCHAR NOT NULL,by_ext_name VARCHAR '
'NOT NULL,etag VARCHAR NOT NULL,last_modified VARCHAR NOT '
'NULL,mime_type VARCHAR(255) NOT NULL,original_mime_type '
'VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 63.0.3239.108 meta.version 37
_SCHEMA_63 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT '
'NULL,tab_url VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT '
'NULL,http_method VARCHAR NOT NULL,by_ext_id VARCHAR NOT '
'NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, last_access_time '
'INTEGER NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 65.0.3325.162
_SCHEMA_65 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, guid VARCHAR NOT '
'NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT X\'\', http_method '
'VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR NOT NULL '
'DEFAULT \'\', tab_referrer_url VARCHAR NOT NULL DEFAULT \'\', '
'site_url VARCHAR NOT NULL DEFAULT \'\', last_access_time INTEGER '
'NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,PRIMARY KEY '
'(download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Chrome 67.0.3396.62.
_SCHEMA_67 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL, interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL, mime_type VARCHAR(255) NOT '
'NULL DEFAULT "", original_mime_type VARCHAR(255) NOT NULL DEFAULT '
'"", guid VARCHAR NOT NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT '
'X\'\', http_method VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR '
'NOT NULL DEFAULT \'\', tab_referrer_url VARCHAR NOT NULL DEFAULT '
'\'\', site_url VARCHAR NOT NULL DEFAULT \'\', last_access_time '
'INTEGER NOT NULL DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in Linux Chrome 67.0.3396.99 meta.version 39
_SCHEMA_67_2 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,current_path '
'LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT NULL,start_time '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL,total_bytes '
'INTEGER NOT NULL,state INTEGER NOT NULL,danger_type INTEGER NOT '
'NULL,interrupt_reason INTEGER NOT NULL,end_time INTEGER NOT '
'NULL,opened INTEGER NOT NULL,referrer VARCHAR NOT NULL,by_ext_id '
'VARCHAR NOT NULL,by_ext_name VARCHAR NOT NULL,etag VARCHAR NOT '
'NULL,last_modified VARCHAR NOT NULL,mime_type VARCHAR(255) NOT '
'NULL,original_mime_type VARCHAR(255) NOT NULL, guid VARCHAR NOT '
'NULL DEFAULT \'\', hash BLOB NOT NULL DEFAULT X\'\', http_method '
'VARCHAR NOT NULL DEFAULT \'\', tab_url VARCHAR NOT NULL DEFAULT '
'\'\', tab_referrer_url VARCHAR NOT NULL DEFAULT \'\', site_url '
'VARCHAR NOT NULL DEFAULT \'\', last_access_time INTEGER NOT NULL '
'DEFAULT 0, transient INTEGER NOT NULL DEFAULT 0)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE "urls"(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
# Observed in MacOS Chrome 67.0.3396.99 meta.version 39
_SCHEMA_67_3 = {
'downloads': (
'CREATE TABLE downloads (id INTEGER PRIMARY KEY,guid VARCHAR NOT '
'NULL,current_path LONGVARCHAR NOT NULL,target_path LONGVARCHAR NOT '
'NULL,start_time INTEGER NOT NULL,received_bytes INTEGER NOT '
'NULL,total_bytes INTEGER NOT NULL,state INTEGER NOT '
'NULL,danger_type INTEGER NOT NULL,interrupt_reason INTEGER NOT '
'NULL,hash BLOB NOT NULL,end_time INTEGER NOT NULL,opened INTEGER '
'NOT NULL,last_access_time INTEGER NOT NULL,transient INTEGER NOT '
'NULL,referrer VARCHAR NOT NULL,site_url VARCHAR NOT NULL,tab_url '
'VARCHAR NOT NULL,tab_referrer_url VARCHAR NOT NULL,http_method '
'VARCHAR NOT NULL,by_ext_id VARCHAR NOT NULL,by_ext_name VARCHAR '
'NOT NULL,etag VARCHAR NOT NULL,last_modified VARCHAR NOT '
'NULL,mime_type VARCHAR(255) NOT NULL,original_mime_type '
'VARCHAR(255) NOT NULL)'),
'downloads_slices': (
'CREATE TABLE downloads_slices (download_id INTEGER NOT NULL,offset '
'INTEGER NOT NULL,received_bytes INTEGER NOT NULL, finished INTEGER '
'NOT NULL DEFAULT 0,PRIMARY KEY (download_id, offset) )'),
'downloads_url_chains': (
'CREATE TABLE downloads_url_chains (id INTEGER NOT NULL,chain_index '
'INTEGER NOT NULL,url LONGVARCHAR NOT NULL, PRIMARY KEY (id, '
'chain_index) )'),
'keyword_search_terms': (
'CREATE TABLE keyword_search_terms (keyword_id INTEGER NOT '
'NULL,url_id INTEGER NOT NULL,lower_term LONGVARCHAR NOT NULL,term '
'LONGVARCHAR NOT NULL)'),
'meta': (
'CREATE TABLE meta(key LONGVARCHAR NOT NULL UNIQUE PRIMARY KEY, '
'value LONGVARCHAR)'),
'segment_usage': (
'CREATE TABLE segment_usage (id INTEGER PRIMARY KEY,segment_id '
'INTEGER NOT NULL,time_slot INTEGER NOT NULL,visit_count INTEGER '
'DEFAULT 0 NOT NULL)'),
'segments': (
'CREATE TABLE segments (id INTEGER PRIMARY KEY,name VARCHAR,url_id '
'INTEGER NON NULL)'),
'typed_url_sync_metadata': (
'CREATE TABLE typed_url_sync_metadata (storage_key INTEGER PRIMARY '
'KEY NOT NULL,value BLOB)'),
'urls': (
'CREATE TABLE urls(id INTEGER PRIMARY KEY AUTOINCREMENT,url '
'LONGVARCHAR,title LONGVARCHAR,visit_count INTEGER DEFAULT 0 NOT '
'NULL,typed_count INTEGER DEFAULT 0 NOT NULL,last_visit_time '
'INTEGER NOT NULL,hidden INTEGER DEFAULT 0 NOT NULL)'),
'visit_source': (
'CREATE TABLE visit_source(id INTEGER PRIMARY KEY,source INTEGER '
'NOT NULL)'),
'visits': (
'CREATE TABLE visits(id INTEGER PRIMARY KEY,url INTEGER NOT '
'NULL,visit_time INTEGER NOT NULL,from_visit INTEGER,transition '
'INTEGER DEFAULT 0 NOT NULL,segment_id INTEGER,visit_duration '
'INTEGER DEFAULT 0 NOT NULL)')}
SCHEMAS = [
_SCHEMA_27, _SCHEMA_31, _SCHEMA_37, _SCHEMA_51, _SCHEMA_58, _SCHEMA_59,
_SCHEMA_63, _SCHEMA_65, _SCHEMA_67, _SCHEMA_67_2, _SCHEMA_67_3]
def ParseFileDownloadedRow(
self, parser_mediator, query, row, **unused_kwargs):
"""Parses a file downloaded row.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row.
"""
query_hash = hash(query)
event_data = ChromeHistoryFileDownloadedEventData()
event_data.full_path = self._GetRowValue(query_hash, row, 'target_path')
event_data.offset = self._GetRowValue(query_hash, row, 'id')
event_data.query = query
event_data.received_bytes = self._GetRowValue(
query_hash, row, 'received_bytes')
event_data.total_bytes = self._GetRowValue(query_hash, row, 'total_bytes')
event_data.url = self._GetRowValue(query_hash, row, 'url')
timestamp = self._GetRowValue(query_hash, row, 'start_time')
date_time = dfdatetime_webkit_time.WebKitTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_FILE_DOWNLOADED)
parser_mediator.ProduceEventWithEventData(event, event_data)
sqlite.SQLiteParser.RegisterPlugins([
GoogleChrome8HistoryPlugin, GoogleChrome27HistoryPlugin])
|
|
#!/usr/bin/python2
from __future__ import print_function
from argparse import ArgumentParser
import code
import socket
from subprocess import check_call, check_output, STDOUT, call
from time import time, sleep
import re
import json
from threading import Thread
import sys
import traceback
machines = [pool + str(i)
for pool in ('main', 'secondary', 'other')
for i in range(1, 4)]
states = ['no_panel', 'old_status']
verbose = False
dump_on_failure = True
dirty_configs = set()
###############################################################################
# TESTS
###############################################################################
def test_machine_timeout():
# NOTE: we assume these are the values set in the broker config.
broker_machine_check = 5
broker_timeout_time = 5
wait_time = broker_timeout_time + broker_machine_check
stop('main1')
wait(lambda: dump()['main1']['status'] == None, timeout=wait_time)
print('main1 status is null')
start('main1')
wait_heartbeat('main1')
assert dump()['main1']['status'] == "Okay"
print('main1 status is Okay')
def test_pscheck():
write_config('main1', 'Check_Scripts', 'pscheck.py,foobar')
wait_heartbeat('main1')
assert dump()['main1']['status'] == 'foobar not found'
print('main1 status is "foobar not found"')
write_config('main1', 'Check_Scripts', 'pscheck.py,python')
wait_heartbeat('main1')
assert dump()['main1']['status'] == 'Okay'
print('main1 status is "Okay"')
def test_oldstatus():
state('main1', 'set', 'oldstatus')
wait_heartbeat('main1')
status = dump()['main1']['status']
assert status.endswith('Okay')
print('main1 status is ' + status)
def test_nopanel():
write_config('main1', 'Check_Scripts', 'nopanel.py,--debug')
state('main1', 'set', 'no_panel')
logon('main1', 'harry')
wait_heartbeat('main1')
assert dump()['main1']['status'] == 'no_panel'
print("main1 status is 'no_panel'")
#ts = timestamp('main1')
logoff('main1')
wait_heartbeat('main1')
assert dump()['main1']['status'] == 'no_panel'
print("main1 status is 'no_panel'")
#assert "Failed to talk to init daemon." in logs('main1', ts)
assert get_state('main1', 'shutdown')
print("main1 rebooted")
state('main1', 'unset', 'shutdown')
logon('main2', 'foo')
logon('main3', 'bar')
wait_heartbeat('main2', 'main3')
m = request('baz', 'main')
assert m.startswith('secondary')
logon('main1', 'steve')
state('main1', 'unset', 'no_panel')
wait_heartbeat('main1')
assert dump()['main1']['status'] == 'Okay'
print("main1 status is 'Okay'")
def test_restoring():
first = request('fred', 'main')
second = request('fred', 'main')
assert first != second
logon(first, 'fred')
wait(lambda: dump()[first]['confirmed'])
second = request('fred', 'main')
assert first == second
write_config(first, 'Check_Scripts', 'nopanel.py,--debug')
logon(first, 'fred') # See note in write_config
state(first, 'set', 'no_panel')
wait_heartbeat(first)
second = request('fred', 'main')
assert first != second
def test_handle_json():
m = request('fred', 'main', json=True)
assert m.startswith('main')
def test_external_checks():
write_config("main1", "Check_Scripts", "example.py")
wait_heartbeat("main1")
assert dump()['main1']['status'] == 'Okay'
print("main1 status is 'Okay'")
write_config("main1", "Check_Scripts", "example.py bad.sh")
wait_heartbeat("main1")
status = dump()['main1']['status']
assert status != 'Okay'
print("main1 status is '{}'".format(status))
test_funcs = [test_handle_json, test_external_checks, test_nopanel,
test_oldstatus, test_machine_timeout, test_pscheck,
test_restoring]
###############################################################################
# UTILS
###############################################################################
def get_state(hostname, state):
return not bool(call(['docker', 'exec', hostname, '[', '-f',
'/tmp/' + state, ']']))
# We ended up not needing these functions, but I'm leaving them because they might come in
# handy later. See the commented code in test_nopanel for usage.
#def logs(hostname, since=None):
# return check_output(['docker', 'logs'] +
# ([] if not since else ['--since', since]) +
# [hostname], stderr=STDOUT)
#def timestamp(hostname):
# last_line = (check_output(['docker', 'logs', '-t', hostname], stderr=STDOUT)
# .strip().split('\n')[-1])
# return last_line if isinstance(last_line, basestring) else last_line[0]
def setup():
clean_configs()
[start(m) for m in machines]
expected = {'status': "Okay", 'confirmed': False, 'user': None}
wait(lambda: all(all(m[key] == expected[key] for key in expected)
for m in dump().values()))
def clean_configs():
global dirty_configs
for machine in dirty_configs:
check_call('docker exec {} rm /etc/cabsagent.conf'.format(machine).split())
restart(machine)
dirty_configs = set()
def broker_cmd(*args, **kwargs):
port = kwargs.get('port', 18181)
use_json = kwargs.get('json', False)
if use_json and verbose:
print('wrapping command in json')
# send command
command = (json.dumps if use_json else ":".join)(args) + "\r\n"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("localhost", port))
s.sendall(command)
# get response
response = ""
buf = s.recv(1024)
while buf:
response += buf
buf = s.recv(1024)
s.close()
return response
def exists(container):
return bool(check_output('docker ps -aqf name={}'.format(container).split()))
def running(container):
return bool(check_output('docker ps -qf name={}'.format(container).split()))
def wait_heartbeat(*hosts):
beats = {host: dump()[host]['last_heartbeat'] for host in hosts}
def check():
d = dump()
return all(d[host]['last_heartbeat'] != beats[host] for host in hosts)
wait(check)
def wait(func, timeout=10):
end_time = time() + timeout
while time() < end_time:
sleep(1)
if func():
return
raise AssertionError("Timed out while waiting for condition")
def remove(hostname):
if running(hostname):
check_call(['docker', 'stop', hostname])
if exists(hostname):
check_call(['docker', 'rm', hostname])
###############################################################################
# USER COMMANDS
###############################################################################
def test():
# setup
global verbose
for t in test_funcs:
try:
print(t.__name__.replace('_', ' ').upper())
print('setting up...')
setup()
verbose = True
t()
verbose = False
print("PASS\n")
except AssertionError as e:
if dump_on_failure:
print(json.dumps(dump(), indent=2))
traceback.print_exc()
clean_configs()
return
print("All tests pass.")
print("cleaning up...")
setup()
def write_config(hostname=None, key=None, value=None):
# NOTE: because we have to restart the machine, any logged in users or set
# states will be cleared.
global dirty_configs
global verbose
if verbose:
print("setting '{}: {}' on {}".format(key, value, hostname))
check_call(['docker', 'exec', '--', hostname, 'bash', '-c',
'echo {}: {} >> /etc/cabsagent.conf'.format(key, value)])
tmp_verbose = verbose
verbose = False
restart(hostname)
verbose = tmp_verbose
dirty_configs.add(hostname)
def request(user=None, pool=None, **kwargs):
machine = broker_cmd("mr", user, "mypassword", pool, **kwargs)
if verbose:
print("requested machine from {} for {}. Got {}.".format(pool, user, machine))
return machine
def start(hostname=None, user=None):
if 'cabsagent' not in check_output(['docker', 'images']):
check_call('docker build -t cabsagent ./agent'.split())
if not running(hostname):
if exists(hostname):
check_output(['docker', 'start', hostname])
else:
check_output('docker run -v $PWD/agent:/code --network=cabsnet --net-alias {hostname} '
'--hostname {hostname} --name {hostname} -d cabsagent'
.format(hostname=hostname), shell=True)
# reset state
info = dump()[hostname]
if info['status'] is None:
wait_heartbeat(hostname)
info = dump()[hostname]
[state(hostname, 'unset', s) for s in states if s in info['status']]
if ' : ' in info['status']:
state(hostname, 'unset', 'oldstatus')
if 'no_panel' in info['status'] or \
(info['user'] is not None and not info['confirmed']):
logon(hostname, 'clear_state')
wait_heartbeat(hostname)
if user:
logon(hostname, user)
else:
logoff(hostname)
def stop(hostname=None):
m_list = [hostname] if hostname else machines
threads = []
for m in m_list:
if verbose:
print("stopping {}".format(m))
t = Thread(target=lambda: check_output(['docker', 'stop', m]))
t.start()
threads.append(t)
[t.join() for t in threads]
def logon(hostname=None, user=None):
if verbose:
print("logging {} on to {}.".format(user, hostname))
check_call(['docker', 'exec', '--', hostname, 'bash', '-c',
'echo {} > /tmp/users.txt'.format(user)])
def logoff(hostname=None):
if verbose:
print("logging off of {}.".format(hostname))
check_call(['docker', 'exec', '--', hostname, 'bash', '-c',
'echo > /tmp/users.txt'])
def query():
return broker_cmd("query", "verbose", port=18183)
def dump():
return json.loads(broker_cmd("dump", port=18183))
def build():
check_call('docker build -t cabsagent ./agent'.split())
print("removing old containers...")
threads = []
for m in machines:
t = Thread(target=remove, args=(m,))
t.start()
threads.append(t)
[t.join() for t in threads]
def state(hostname=None, action=None, state=None):
if verbose:
print("{}ting state {} on {}.".format(action, state, hostname))
cmd = "touch" if action == 'set' else 'rm -f'
check_call('docker exec {} {} /tmp/{}'.format(hostname, cmd, state).split())
def restart(hostname=None, hard=None):
m_list = [hostname] if hostname else machines
def stopstart(m):
if hard:
remove(m)
else:
if not running(m):
return
stop(m)
start(m)
threads = []
for m in m_list:
t = Thread(target=stopstart, args=(m,))
t.start()
threads.append(t)
[t.join() for t in threads]
if __name__ == "__main__":
parser = ArgumentParser()
sub = parser.add_subparsers()
p = sub.add_parser("request", help="request a machine")
p.add_argument("user")
p.add_argument("pool")
p.set_defaults(func=request)
p = sub.add_parser("start", help="start a machine")
p.add_argument("hostname")
p.add_argument("user", nargs='?')
p.set_defaults(func=start)
p = sub.add_parser("stop", help="stop a machine")
p.add_argument("hostname", nargs='?')
p.set_defaults(func=stop)
p = sub.add_parser("logon", help="log a user on to a machine")
p.add_argument("hostname")
p.add_argument("user")
p.set_defaults(func=logon)
p = sub.add_parser("logoff", help="log a user off a machine")
p.add_argument("hostname")
p.set_defaults(func=logoff)
p = sub.add_parser("query", help="print current machine info")
p.set_defaults(func=query)
p = sub.add_parser("dump", help="print current machine info")
p.set_defaults(func=dump)
p = sub.add_parser("test", help="test the broker")
p.set_defaults(func=test)
p = sub.add_parser("build", help="build the agent docker image")
p.set_defaults(func=build)
p = sub.add_parser("restart", help="restart the cabs agent on a machine")
p.add_argument("hostname", nargs='?')
p.add_argument("--hard", "-f", action='store_true', help="remove the container")
p.set_defaults(func=restart)
p = sub.add_parser("state", help="set the state of an agent")
p.add_argument("hostname")
p.add_argument("action", choices=['set', 'unset'])
p.add_argument("state", choices=states)
p.set_defaults(func=state)
p = sub.add_parser("config", help="set a config option of an agent")
p.add_argument("hostname")
p.add_argument("key")
p.add_argument("value")
p.set_defaults(func=write_config)
args = vars(parser.parse_args())
func = args['func']
args.pop('func')
response = func(**args)
if response:
if any(isinstance(response, t) for t in (dict, list)):
print(json.dumps(response, indent=2))
else:
print(response)
|
|
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
from openstack_horizon.api import base
from openstack_horizon.api import fwaas
from openstack_horizon.api import lbaas
from openstack_horizon.api import neutron
from openstack_horizon.api import vpn
from openstack_horizon.test.test_data import utils
def data(TEST):
# Data returned by openstack_horizon.api.neutron wrapper.
TEST.agents = utils.TestDataContainer()
TEST.networks = utils.TestDataContainer()
TEST.subnets = utils.TestDataContainer()
TEST.ports = utils.TestDataContainer()
TEST.routers = utils.TestDataContainer()
TEST.routers_with_rules = utils.TestDataContainer()
TEST.q_floating_ips = utils.TestDataContainer()
TEST.q_secgroups = utils.TestDataContainer()
TEST.q_secgroup_rules = utils.TestDataContainer()
TEST.providers = utils.TestDataContainer()
TEST.pools = utils.TestDataContainer()
TEST.vips = utils.TestDataContainer()
TEST.members = utils.TestDataContainer()
TEST.monitors = utils.TestDataContainer()
TEST.neutron_quotas = utils.TestDataContainer()
TEST.net_profiles = utils.TestDataContainer()
TEST.policy_profiles = utils.TestDataContainer()
TEST.network_profile_binding = utils.TestDataContainer()
TEST.policy_profile_binding = utils.TestDataContainer()
TEST.vpnservices = utils.TestDataContainer()
TEST.ikepolicies = utils.TestDataContainer()
TEST.ipsecpolicies = utils.TestDataContainer()
TEST.ipsecsiteconnections = utils.TestDataContainer()
TEST.firewalls = utils.TestDataContainer()
TEST.fw_policies = utils.TestDataContainer()
TEST.fw_rules = utils.TestDataContainer()
# Data return by neutronclient.
TEST.api_agents = utils.TestDataContainer()
TEST.api_networks = utils.TestDataContainer()
TEST.api_subnets = utils.TestDataContainer()
TEST.api_ports = utils.TestDataContainer()
TEST.api_routers = utils.TestDataContainer()
TEST.api_q_floating_ips = utils.TestDataContainer()
TEST.api_q_secgroups = utils.TestDataContainer()
TEST.api_q_secgroup_rules = utils.TestDataContainer()
TEST.api_pools = utils.TestDataContainer()
TEST.api_vips = utils.TestDataContainer()
TEST.api_members = utils.TestDataContainer()
TEST.api_monitors = utils.TestDataContainer()
TEST.api_extensions = utils.TestDataContainer()
TEST.api_net_profiles = utils.TestDataContainer()
TEST.api_policy_profiles = utils.TestDataContainer()
TEST.api_network_profile_binding = utils.TestDataContainer()
TEST.api_policy_profile_binding = utils.TestDataContainer()
TEST.api_vpnservices = utils.TestDataContainer()
TEST.api_ikepolicies = utils.TestDataContainer()
TEST.api_ipsecpolicies = utils.TestDataContainer()
TEST.api_ipsecsiteconnections = utils.TestDataContainer()
TEST.api_firewalls = utils.TestDataContainer()
TEST.api_fw_policies = utils.TestDataContainer()
TEST.api_fw_rules = utils.TestDataContainer()
# 1st network.
network_dict = {'admin_state_up': True,
'id': '82288d84-e0a5-42ac-95be-e6af08727e42',
'name': 'net1',
'status': 'ACTIVE',
'subnets': ['e8abc972-eb0c-41f1-9edd-4bc6e3bcd8c9'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': '10.0.0.254',
'start': '10.0.0.2'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '10.0.0.0/24',
'enable_dhcp': True,
'gateway_ip': '10.0.0.1',
'id': network_dict['subnets'][0],
'ip_version': 4,
'name': 'mysubnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# Network profile for network when using the cisco n1k plugin.
net_profile_dict = {'name': 'net_profile_test1',
'segment_type': 'vlan',
'physical_network': 'phys1',
'segment_range': '3000-3100',
'id':
'00000000-1111-1111-1111-000000000000',
'project': TEST.networks.get(name="net1")['tenant_id'],
# vlan profiles have no sub_type or multicast_ip_range
'multicast_ip_range': None,
'sub_type': None}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# Policy profile for port when using the cisco n1k plugin.
policy_profile_dict = {'name': 'policy_profile_test1',
'id':
'00000000-9999-9999-9999-000000000000'}
TEST.api_policy_profiles.add(policy_profile_dict)
TEST.policy_profiles.add(neutron.Profile(policy_profile_dict))
# Network profile binding.
network_profile_binding_dict = {'profile_id':
'00000000-1111-1111-1111-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
# Policy profile binding.
policy_profile_binding_dict = {'profile_id':
'00000000-9999-9999-9999-000000000000',
'tenant_id': network_dict['tenant_id']}
TEST.api_policy_profile_binding.add(policy_profile_binding_dict)
TEST.policy_profile_binding.add(neutron.Profile(
policy_profile_binding_dict))
# Ports on 1st network.
port_dict = {'admin_state_up': True,
'device_id': 'af75c8e5-a1cc-4567-8d04-44fcd6922890',
'device_owner': 'network:dhcp',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '063cf7f3-ded1-4297-bc4c-31eae876cc91',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
port_dict = {'admin_state_up': True,
'device_id': '1',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '10.0.0.4',
'subnet_id': subnet_dict['id']}],
'id': '7e6ce62c-7ea2-44f8-b6b4-769af90a8406',
'mac_address': 'fa:16:3e:9d:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
assoc_port = port_dict
port_dict = {'admin_state_up': True,
'device_id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'device_owner': 'network:router_interface',
'fixed_ips': [{'ip_address': '10.0.0.1',
'subnet_id': subnet_dict['id']}],
'id': '9036eedb-e7fa-458e-bc6e-d9d06d9d1bc4',
'mac_address': 'fa:16:3e:9c:d5:7f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
# 2nd network.
network_dict = {'admin_state_up': True,
'id': '72c3ab6c-c80f-4341-9dc5-210fa31ac6c2',
'name': 'net2',
'status': 'ACTIVE',
'subnets': ['3f7c5d79-ee55-47b0-9213-8e669fb03009'],
'tenant_id': '2',
'router:external': False,
'shared': True}
subnet_dict = {'allocation_pools': [{'end': '172.16.88.254',
'start': '172.16.88.2'}],
'dns_nameservers': ['10.56.1.20', '10.56.1.21'],
'host_routes': [{'destination': '192.168.20.0/24',
'nexthop': '172.16.88.253'},
{'destination': '192.168.21.0/24',
'nexthop': '172.16.88.252'}],
'cidr': '172.16.88.0/24',
'enable_dhcp': True,
'gateway_ip': '172.16.88.1',
'id': '3f7c5d79-ee55-47b0-9213-8e669fb03009',
'ip_version': 4,
'name': 'aaaa',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
port_dict = {'admin_state_up': True,
'device_id': '2',
'device_owner': 'compute:nova',
'fixed_ips': [{'ip_address': '172.16.88.3',
'subnet_id': subnet_dict['id']}],
'id': '1db2cc37-3553-43fa-b7e2-3fc4eb4f9905',
'mac_address': 'fa:16:3e:56:e6:2f',
'name': '',
'network_id': network_dict['id'],
'status': 'ACTIVE',
'tenant_id': network_dict['tenant_id']}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
# External network.
network_dict = {'admin_state_up': True,
'id': '9b466b94-213a-4cda-badf-72c102a874da',
'name': 'ext_net',
'status': 'ACTIVE',
'subnets': ['d6bdc71c-7566-4d32-b3ff-36441ce746e8'],
'tenant_id': '3',
'router:external': True,
'shared': False}
subnet_dict = {'allocation_pools': [{'start': '172.24.4.226.',
'end': '172.24.4.238'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': '172.24.4.0/28',
'enable_dhcp': False,
'gateway_ip': '172.24.4.225',
'id': 'd6bdc71c-7566-4d32-b3ff-36441ce746e8',
'ip_version': 4,
'name': 'ext_subnet',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id']}
ext_net = network_dict
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# 1st v6 network.
network_dict = {'admin_state_up': True,
'id': '96688ea1-ffa5-78ec-22ca-33aaabfaf775',
'name': 'v6_net1',
'status': 'ACTIVE',
'subnets': ['88ddd443-4377-ab1f-87dd-4bc4a662dbb6'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
'start': 'ff09::02'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': 'ff09::/64',
'enable_dhcp': True,
'gateway_ip': 'ff09::1',
'id': network_dict['subnets'][0],
'ip_version': 6,
'name': 'v6_subnet1',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id'],
'ipv6_modes': 'none/none'}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# 2nd v6 network - slaac.
network_dict = {'admin_state_up': True,
'id': 'c62e4bb3-296a-4cd1-8f6b-aaa7a0092326',
'name': 'v6_net2',
'status': 'ACTIVE',
'subnets': ['5d736a21-0036-4779-8f8b-eed5f98077ec'],
'tenant_id': '1',
'router:external': False,
'shared': False}
subnet_dict = {'allocation_pools': [{'end': 'ff09::ff',
'start': 'ff09::02'}],
'dns_nameservers': [],
'host_routes': [],
'cidr': 'ff09::/64',
'enable_dhcp': True,
'gateway_ip': 'ff09::1',
'id': network_dict['subnets'][0],
'ip_version': 6,
'name': 'v6_subnet2',
'network_id': network_dict['id'],
'tenant_id': network_dict['tenant_id'],
'ipv6_modes': 'slaac/slaac'}
TEST.api_networks.add(network_dict)
TEST.api_subnets.add(subnet_dict)
network = copy.deepcopy(network_dict)
subnet = neutron.Subnet(subnet_dict)
network['subnets'] = [subnet]
TEST.networks.add(neutron.Network(network))
TEST.subnets.add(subnet)
# Set up router data.
port_dict = {'admin_state_up': True,
'device_id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'device_owner': 'network:router_gateway',
'fixed_ips': [{'ip_address': '10.0.0.3',
'subnet_id': subnet_dict['id']}],
'id': '44ec6726-4bdc-48c5-94d4-df8d1fbf613b',
'mac_address': 'fa:16:3e:9c:d5:7e',
'name': '',
'network_id': TEST.networks.get(name="ext_net")['id'],
'status': 'ACTIVE',
'tenant_id': '1'}
TEST.api_ports.add(port_dict)
TEST.ports.add(neutron.Port(port_dict))
router_dict = {'id': '279989f7-54bb-41d9-ba42-0d61f12fda61',
'name': 'router1',
'status': 'ACTIVE',
'admin_state_up': True,
'distributed': True,
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '10e3dc42-1ce1-4d48-87cf-7fc333055d6c',
'name': 'router2',
'status': 'ACTIVE',
'admin_state_up': False,
'distributed': False,
'external_gateway_info': None,
'tenant_id': '1'}
TEST.api_routers.add(router_dict)
TEST.routers.add(neutron.Router(router_dict))
router_dict = {'id': '7180cede-bcd8-4334-b19f-f7ef2f331f53',
'name': 'rulerouter',
'status': 'ACTIVE',
'admin_state_up': True,
'distributed': False,
'external_gateway_info':
{'network_id': ext_net['id']},
'tenant_id': '1',
'router_rules': [{'id': '101',
'action': 'deny',
'source': 'any',
'destination': 'any',
'nexthops': []},
{'id': '102',
'action': 'permit',
'source': 'any',
'destination': '8.8.8.8/32',
'nexthops': ['1.0.0.2', '1.0.0.1']}]}
TEST.api_routers.add(router_dict)
TEST.routers_with_rules.add(neutron.Router(router_dict))
# Floating IP.
# Unassociated.
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.227',
'floating_network_id': ext_net['id'],
'id': '9012cd70-cfae-4e46-b71e-6a409e9e0063',
'fixed_ip_address': None,
'port_id': None,
'router_id': None}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
# Associated (with compute port on 1st network).
fip_dict = {'tenant_id': '1',
'floating_ip_address': '172.16.88.228',
'floating_network_id': ext_net['id'],
'id': 'a97af8f2-3149-4b97-abbd-e49ad19510f7',
'fixed_ip_address': assoc_port['fixed_ips'][0]['ip_address'],
'port_id': assoc_port['id'],
'router_id': router_dict['id']}
TEST.api_q_floating_ips.add(fip_dict)
TEST.q_floating_ips.add(neutron.FloatingIp(fip_dict))
# Security group.
sec_group_1 = {'tenant_id': '1',
'description': 'default',
'id': 'faad7c80-3b62-4440-967c-13808c37131d',
'name': 'default'}
sec_group_2 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '27a5c9a1-bdbb-48ac-833a-2e4b5f54b31d',
'name': 'other_group'}
sec_group_3 = {'tenant_id': '1',
'description': 'NotDefault',
'id': '443a4d7a-4bd2-4474-9a77-02b35c9f8c95',
'name': 'another_group'}
def add_rule_to_group(secgroup, default_only=True):
rule_egress_ipv4 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_egress_ipv6 = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv6',
'port_range_min': None, 'port_range_max': None,
'protocol': None, 'remote_group_id': None,
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_tcp_80 = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_icmp = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 5, 'port_range_max': 8,
'protocol': u'icmp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/0',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_group = {
'id': str(uuid.uuid4()),
'direction': u'ingress', 'ethertype': u'IPv4',
'port_range_min': 80, 'port_range_max': 80,
'protocol': u'tcp', 'remote_group_id': sec_group_1['id'],
'remote_ip_prefix': None,
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rule_all_tcp = {
'id': str(uuid.uuid4()),
'direction': u'egress', 'ethertype': u'IPv4',
'port_range_min': 1, 'port_range_max': 65535,
'protocol': u'tcp', 'remote_group_id': None,
'remote_ip_prefix': u'0.0.0.0/24',
'security_group_id': secgroup['id'],
'tenant_id': secgroup['tenant_id']}
rules = []
if not default_only:
rules += [rule_tcp_80, rule_icmp, rule_group, rule_all_tcp]
rules += [rule_egress_ipv4, rule_egress_ipv6]
secgroup['security_group_rules'] = rules
add_rule_to_group(sec_group_1, default_only=False)
add_rule_to_group(sec_group_2)
add_rule_to_group(sec_group_3)
groups = [sec_group_1, sec_group_2, sec_group_3]
sg_name_dict = dict([(sg['id'], sg['name']) for sg in groups])
for sg in groups:
# Neutron API.
TEST.api_q_secgroups.add(sg)
for rule in sg['security_group_rules']:
TEST.api_q_secgroup_rules.add(copy.copy(rule))
# OpenStack Dashboard internaly API.
TEST.q_secgroups.add(
neutron.SecurityGroup(copy.deepcopy(sg), sg_name_dict))
for rule in sg['security_group_rules']:
TEST.q_secgroup_rules.add(
neutron.SecurityGroupRule(copy.copy(rule), sg_name_dict))
# LBaaS.
# 1st pool.
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'vip_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'pool1',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTP',
'lb_method': 'ROUND_ROBIN',
'health_monitors': TEST.monitors.list(),
'members': ['78a46e5e-eb1a-418a-88c7-0e3f5968b08'],
'admin_state_up': True,
'status': 'ACTIVE',
'provider': 'haproxy'}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 2nd pool.
pool_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d50',
'tenant_id': '1',
'vip_id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'pool2',
'description': 'pool description',
'subnet_id': TEST.subnets.first().id,
'protocol': 'HTTPS',
'lb_method': 'ROUND_ROBIN',
'health_monitors': TEST.monitors.list()[0:1],
'members': [],
'status': 'PENDING_CREATE',
'admin_state_up': True}
TEST.api_pools.add(pool_dict)
TEST.pools.add(lbaas.Pool(pool_dict))
# 1st vip.
vip_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'vip1',
'address': '10.0.0.100',
'floatip_address': '',
'other_address': '10.0.0.100',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'port_id': TEST.ports.first().id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 2nd vip.
vip_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'name': 'vip2',
'address': '10.0.0.110',
'floatip_address': '',
'other_address': '10.0.0.110',
'description': 'vip description',
'subnet_id': TEST.subnets.first().id,
'port_id': TEST.ports.list()[0].id,
'subnet': TEST.subnets.first().cidr,
'protocol_port': 80,
'protocol': pool_dict['protocol'],
'pool_id': pool_dict['id'],
'session_persistence': {'type': 'APP_COOKIE',
'cookie_name': 'jssessionid'},
'connection_limit': 10,
'admin_state_up': True}
TEST.api_vips.add(vip_dict)
TEST.vips.add(lbaas.Vip(vip_dict))
# 1st member.
member_dict = {'id': '78a46e5e-eb1a-418a-88c7-0e3f5968b08',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.11',
'protocol_port': 80,
'weight': 10,
'status': 'ACTIVE',
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 2nd member.
member_dict = {'id': '41ac1f8d-6d9c-49a4-a1bf-41955e651f91',
'tenant_id': '1',
'pool_id': pool_dict['id'],
'address': '10.0.0.12',
'protocol_port': 80,
'weight': 10,
'status': 'ACTIVE',
'admin_state_up': True}
TEST.api_members.add(member_dict)
TEST.members.add(lbaas.Member(member_dict))
# 1st monitor.
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff96',
'type': 'http',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'http_method': 'GET',
'url_path': '/',
'expected_codes': '200',
'admin_state_up': True,
"pools": [{"pool_id": TEST.pools.list()[0].id},
{"pool_id": TEST.pools.list()[1].id}],
}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
# 2nd monitor.
monitor_dict = {'id': 'd4a0500f-db2b-4cc4-afcf-ec026febff97',
'type': 'ping',
'delay': 10,
'timeout': 10,
'max_retries': 10,
'admin_state_up': True,
'pools': [],
}
TEST.api_monitors.add(monitor_dict)
TEST.monitors.add(lbaas.PoolMonitor(monitor_dict))
# Quotas.
quota_data = {'network': '10',
'subnet': '10',
'port': '50',
'router': '10',
'floatingip': '50',
'security_group': '20',
'security_group_rule': '100',
}
TEST.neutron_quotas.add(base.QuotaSet(quota_data))
# Extensions.
extension_1 = {"name": "security-group",
"alias": "security-group",
"description": "The security groups extension."}
extension_2 = {"name": "Quota management support",
"alias": "quotas",
"description": "Expose functions for quotas management"}
extension_3 = {"name": "Provider network",
"alias": "provider",
"description": "Provider network extension"}
extension_4 = {"name": "Distributed Virtual Router",
"alias": "dvr",
"description":
"Enables configuration of Distributed Virtual Routers."}
extension_5 = {"name": "HA Router extension",
"alias": "l3-ha",
"description": "Add HA capability to routers."}
TEST.api_extensions.add(extension_1)
TEST.api_extensions.add(extension_2)
TEST.api_extensions.add(extension_3)
TEST.api_extensions.add(extension_4)
TEST.api_extensions.add(extension_5)
# 1st agent.
agent_dict = {"binary": "neutron-openvswitch-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:47",
"alive": True,
"id": "c876ff05-f440-443e-808c-1d34cda3e88a",
"topic": "N/A",
"host": "devstack001",
"agent_type": "Open vSwitch agent",
"started_at": "2013-07-26 05:23:28",
"created_at": "2013-07-26 05:23:28",
"configurations": {"devices": 2}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# 2nd agent.
agent_dict = {"binary": "neutron-dhcp-agent",
"description": None,
"admin_state_up": True,
"heartbeat_timestamp": "2013-07-26 06:51:48",
"alive": True,
"id": "f0d12e3d-1973-41a2-b977-b95693f9a8aa",
"topic": "dhcp_agent",
"host": "devstack001",
"agent_type": "DHCP agent",
"started_at": "2013-07-26 05:23:30",
"created_at": "2013-07-26 05:23:30",
"configurations": {
"subnets": 1,
"use_namespaces": True,
"dhcp_lease_duration": 120,
"dhcp_driver": "neutron.agent.linux.dhcp.Dnsmasq",
"networks": 1,
"ports": 1}}
TEST.api_agents.add(agent_dict)
TEST.agents.add(neutron.Agent(agent_dict))
# Service providers.
provider_1 = {"service_type": "LOADBALANCER",
"name": "haproxy",
"default": True}
TEST.providers.add(provider_1)
# VPNaaS.
# 1st VPNService.
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d61',
'tenant_id': '1',
'name': 'cloud_vpn1',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active',
'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 2nd VPNService.
vpnservice_dict = {'id': '09a26949-6231-4f72-942a-0c8c0ddd4d62',
'tenant_id': '1',
'name': 'cloud_vpn2',
'description': 'vpn description',
'subnet_id': TEST.subnets.first().id,
'router_id': TEST.routers.first().id,
'vpn_type': 'ipsec',
'ipsecsiteconnections': [],
'admin_state_up': True,
'status': 'Active',
'ipsecsiteconns': []}
TEST.api_vpnservices.add(vpnservice_dict)
TEST.vpnservices.add(vpn.VPNService(vpnservice_dict))
# 1st IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c981',
'tenant_id': '1',
'name': 'ikepolicy_1',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5',
'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 2nd IKEPolicy
ikepolicy_dict = {'id': 'a1f009b7-0ffa-43a7-ba19-dcabb0b4c982',
'tenant_id': '1',
'name': 'ikepolicy_2',
'description': 'ikepolicy description',
'auth_algorithm': 'sha1',
'encryption_algorithm': 'aes-256',
'ike_version': 'v1',
'lifetime': {'units': 'seconds', 'value': 3600},
'phase1_negotiation_mode': 'main',
'pfs': 'group5',
'ipsecsiteconns': []}
TEST.api_ikepolicies.add(ikepolicy_dict)
TEST.ikepolicies.add(vpn.IKEPolicy(ikepolicy_dict))
# 1st IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb8',
'tenant_id': '1',
'name': 'ipsecpolicy_1',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp',
'ipsecsiteconns': TEST.ipsecsiteconnections.list()}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 2nd IPSecPolicy
ipsecpolicy_dict = {'id': '8376e1dd-2b1c-4346-b23c-6989e75ecdb9',
'tenant_id': '1',
'name': 'ipsecpolicy_2',
'description': 'ipsecpolicy description',
'auth_algorithm': 'sha1',
'encapsulation_mode': 'tunnel',
'encryption_algorithm': '3des',
'lifetime': {'units': 'seconds', 'value': 3600},
'pfs': 'group5',
'transform_protocol': 'esp',
'ipsecsiteconns': []}
TEST.api_ipsecpolicies.add(ipsecpolicy_dict)
TEST.ipsecpolicies.add(vpn.IPSecPolicy(ipsecpolicy_dict))
# 1st IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d6',
'tenant_id': '1',
'name': 'ipsec_connection_1',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': 1500,
'peer_address':
'2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'peer_cidrs': ['20.1.0.0/24', '21.1.0.0/24'],
'peer_id':
'2607:f0d0:4545:3:200:f8ff:fe21:67cf',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# 2nd IPSecSiteConnection
ipsecsiteconnection_dict = {'id': 'dd1dd3a0-f349-49be-b013-245e147763d7',
'tenant_id': '1',
'name': 'ipsec_connection_2',
'description': 'vpn connection description',
'dpd': {'action': 'hold',
'interval': 30,
'timeout': 120},
'ikepolicy_id': ikepolicy_dict['id'],
'initiator': 'bi-directional',
'ipsecpolicy_id': ipsecpolicy_dict['id'],
'mtu': 1500,
'peer_address': '172.0.0.2',
'peer_cidrs': ['20.1.0.0/24'],
'peer_id': '172.0.0.2',
'psk': 'secret',
'vpnservice_id': vpnservice_dict['id'],
'admin_state_up': True,
'status': 'Active'}
TEST.api_ipsecsiteconnections.add(ipsecsiteconnection_dict)
TEST.ipsecsiteconnections.add(
vpn.IPSecSiteConnection(ipsecsiteconnection_dict))
# FWaaS
# 1st rule (used by 1st policy)
rule1_dict = {'id': 'f0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule1',
'description': 'rule1 description',
'protocol': 'tcp',
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 1,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule1_dict)
rule1 = fwaas.Rule(copy.deepcopy(rule1_dict))
# NOTE: rule1['policy'] is set below
TEST.fw_rules.add(rule1)
# 2nd rule (used by 2nd policy; no name)
rule2_dict = {'id': 'c6298a93-850f-4f64-b78a-959fd4f1e5df',
'tenant_id': '1',
'name': '',
'description': '',
'protocol': 'udp',
'action': 'deny',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'position': 2,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule2_dict)
rule2 = fwaas.Rule(copy.deepcopy(rule2_dict))
# NOTE: rule2['policy'] is set below
TEST.fw_rules.add(rule2)
# 3rd rule (not used by any policy)
rule3_dict = {'id': 'h0881d38-c3eb-4fee-9763-12de3338041d',
'tenant_id': '1',
'name': 'rule3',
'description': 'rule3 description',
'protocol': None,
'action': 'allow',
'source_ip_address': '1.2.3.0/24',
'source_port': '80',
'destination_ip_address': '4.5.6.7/32',
'destination_port': '1:65535',
'firewall_policy_id': None,
'position': None,
'shared': True,
'enabled': True}
TEST.api_fw_rules.add(rule3_dict)
rule3 = fwaas.Rule(copy.deepcopy(rule3_dict))
# rule3 is not associated with any rules
rule3._apidict['policy'] = None
TEST.fw_rules.add(rule3)
# 1st policy (associated with 2 rules)
policy1_dict = {'id': 'abcdef-c3eb-4fee-9763-12de3338041e',
'tenant_id': '1',
'name': 'policy1',
'description': 'policy with two rules',
'firewall_rules': [rule1_dict['id'], rule2_dict['id']],
'audited': True,
'shared': True}
TEST.api_fw_policies.add(policy1_dict)
policy1 = fwaas.Policy(copy.deepcopy(policy1_dict))
policy1._apidict['rules'] = [rule1, rule2]
TEST.fw_policies.add(policy1)
# Reverse relations (rule -> policy)
rule1._apidict['policy'] = policy1
rule2._apidict['policy'] = policy1
# 2nd policy (associated with no rules; no name)
policy2_dict = {'id': 'cf50b331-787a-4623-825e-da794c918d6a',
'tenant_id': '1',
'name': '',
'description': '',
'firewall_rules': [],
'audited': False,
'shared': False}
TEST.api_fw_policies.add(policy2_dict)
policy2 = fwaas.Policy(copy.deepcopy(policy2_dict))
policy2._apidict['rules'] = []
TEST.fw_policies.add(policy2)
# 1st firewall
fw1_dict = {'id': '8913dde8-4915-4b90-8d3e-b95eeedb0d49',
'tenant_id': '1',
'firewall_policy_id':
'abcdef-c3eb-4fee-9763-12de3338041e',
'name': 'firewall1',
'description': 'firewall description',
'status': 'PENDING_CREATE',
'shared': True,
'admin_state_up': True}
TEST.api_firewalls.add(fw1_dict)
fw1 = fwaas.Firewall(copy.deepcopy(fw1_dict))
fw1._apidict['policy'] = policy1
TEST.firewalls.add(fw1)
# 2nd firewall (no name)
fw2_dict = {'id': '1aa75150-415f-458e-bae5-5a362a4fb1f7',
'tenant_id': '1',
'firewall_policy_id':
'abcdef-c3eb-4fee-9763-12de3338041e',
'name': '',
'description': '',
'status': 'PENDING_CREATE',
'shared': True,
'admin_state_up': True}
TEST.api_firewalls.add(fw1_dict)
fw2 = fwaas.Firewall(copy.deepcopy(fw2_dict))
fw2._apidict['policy'] = policy1
TEST.firewalls.add(fw1)
# Additional Cisco N1K profiles.
# 2nd network profile for network when using the cisco n1k plugin.
# Profile applied on 1st network.
net_profile_dict = {'name': 'net_profile_test2',
'segment_type': 'overlay',
'sub_type': 'native_vxlan',
'segment_range': '10000-10100',
'multicast_ip_range': '144.0.0.0-144.0.0.100',
'id':
'00000000-2222-2222-2222-000000000000',
'project': '1',
# overlay profiles have no physical_network
'physical_network': None}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# 2nd network profile binding.
network_profile_binding_dict = {'profile_id':
'00000000-2222-2222-2222-000000000000',
'tenant_id': '1'}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
# 3rd network profile for network when using the cisco n1k plugin
# Profile applied on 1st network
net_profile_dict = {'name': 'net_profile_test3',
'segment_type': 'overlay',
'sub_type': 'other',
'other_subtype': 'GRE',
'segment_range': '11000-11100',
'id':
'00000000-3333-3333-3333-000000000000',
'project': '1'}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# 3rd network profile binding
network_profile_binding_dict = {'profile_id':
'00000000-3333-3333-3333-000000000000',
'tenant_id': '1'}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
# 4th network profile for network when using the cisco n1k plugin
# Profile applied on 1st network
net_profile_dict = {'name': 'net_profile_test4',
'segment_type': 'trunk',
'sub_type_trunk': 'vlan',
'id':
'00000000-4444-4444-4444-000000000000',
'project': '1'}
TEST.api_net_profiles.add(net_profile_dict)
TEST.net_profiles.add(neutron.Profile(net_profile_dict))
# 4th network profile binding
network_profile_binding_dict = {'profile_id':
'00000000-4444-4444-4444-000000000000',
'tenant_id': '1'}
TEST.api_network_profile_binding.add(network_profile_binding_dict)
TEST.network_profile_binding.add(neutron.Profile(
network_profile_binding_dict))
|
|
# Chicago Tribune News Applications fabfile
# No copying allowed
from fabric.api import *
"""
Base configuration
"""
#name of the deployed site if different from the name of the project
env.site_name = 'boundaries'
env.project_name = 'boundaries'
env.database_password = ''
env.site_media_prefix = "site_media"
env.admin_media_prefix = "admin_media"
env.newsapps_media_prefix = "na_media"
env.path = '/home/newsapps/sites/%(project_name)s' % env
env.log_path = '/home/newsapps/logs/%(project_name)s' % env
env.env_path = '%(path)s/env' % env
env.repo_path = '%(path)s/repository' % env
env.apache_config_path = '/home/newsapps/sites/apache/%(project_name)s' % env
env.python = 'python2.6'
env.repository_url = 'git://github.com/hampelm/Detroit-Boundaryservice.git'
"""
Environments
"""
def production():
"""
Work on production environment
"""
env.settings = 'production'
env.hosts = ['ec2-184-72-159-144.compute-1.amazonaws.com']
env.user = 'newsapps'
env.s3_bucket = 'boundaries.detroitledger.org'
def staging():
"""
Work on staging environment
"""
env.settings = 'staging'
env.hosts = ['TODO']
env.user = 'newsapps'
env.s3_bucket = 'TODO'
"""
Branches
"""
def stable():
"""
Work on stable branch.
"""
env.branch = 'stable'
def master():
"""
Work on development branch.
"""
env.branch = 'master'
def branch(branch_name):
"""
Work on any specified branch.
"""
env.branch = branch_name
"""
Commands - setup
"""
def setup():
"""
Setup a fresh virtualenv, install everything we need, and fire up the database.
Does NOT perform the functions of deploy().
"""
_confirm_branch()
require('settings', provided_by=[production, staging])
require('branch', provided_by=[stable, master, branch])
setup_directories()
setup_virtualenv()
clone_repo()
checkout_latest()
install_requirements()
destroy_database()
create_database()
sql_init()
syncdb()
install_apache_conf()
deploy_requirements_to_s3()
def setup_directories():
"""
Create directories necessary for deployment.
"""
run('mkdir -p %(path)s' % env)
run('mkdir -p %(env_path)s' % env)
run ('mkdir -p %(log_path)s;' % env)
sudo('chgrp -R www-data %(log_path)s; chmod -R g+w %(log_path)s;' % env)
run('ln -s %(log_path)s %(path)s/logs' % env)
def setup_virtualenv():
"""
Setup a fresh virtualenv.
"""
run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env)
run('source %(env_path)s/bin/activate; easy_install -U setuptools; easy_install pip;' % env)
def clone_repo():
"""
Do initial clone of the git repository.
"""
run('git clone %(repository_url)s %(repo_path)s' % env)
def checkout_latest():
"""
Pull the latest code on the specified branch.
"""
with cd(env.repo_path):
if env.branch != 'master':
with settings(warn_only=True):
run('git checkout -b %(branch)s origin/%(branch)s' % env)
run('git checkout %(branch)s;' % env)
run('git pull origin %(branch)s' % env)
def install_requirements():
"""
Install the required packages using pip.
"""
run('source %(env_path)s/bin/activate; pip install -E %(env_path)s -r %(repo_path)s/requirements.txt' % env)
def install_apache_conf():
"""
Install the apache site config file.
"""
sudo('cp %(repo_path)s/%(project_name)s/configs/%(settings)s/apache %(apache_config_path)s' % env)
sudo('cp %(repo_path)s/%(project_name)s/configs_api/%(settings)s/apache %(apache_config_path)s_api' % env)
def deploy_requirements_to_s3():
"""
Deploy the latest newsapps and admin media to s3.
"""
run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s/%(admin_media_prefix)s/' % env)
run('s3cmd -P --guess-mime-type sync %(env_path)s/src/django/django/contrib/admin/media/ s3://%(s3_bucket)s/%(project_name)s/%(admin_media_prefix)s/' % env)
run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s/%(newsapps_media_prefix)s/' % env)
run('s3cmd -P --guess-mime-type sync %(env_path)s/src/newsapps/newsapps/na_media/ s3://%(s3_bucket)s/%(project_name)s/%(newsapps_media_prefix)s/' % env)
"""
Commands - deployment
"""
def deploy():
"""
Deploy the latest version of the site to the server and restart Apache2.
Does not perform the functions of load_new_data().
"""
_confirm_branch()
require('settings', provided_by=[production, staging])
require('branch', provided_by=[stable, master, branch])
with settings(warn_only=True):
maintenance_up()
checkout_latest()
gzip_assets()
deploy_to_s3()
maintenance_down()
def maintenance_up():
"""
Install the Apache maintenance configuration.
"""
sudo('cp %(repo_path)s/%(project_name)s/configs/%(settings)s/apache_maintenance %(apache_config_path)s' % env)
sudo('cp %(repo_path)s/%(project_name)s/configs_api/%(settings)s/apache_maintenance %(apache_config_path)s_api' % env)
reboot()
def gzip_assets():
"""
GZips every file in the assets directory and places the new file
in the gzip directory with the same filename.
"""
run('cd %(repo_path)s; python gzip_assets.py' % env)
def deploy_to_s3():
"""
Deploy the latest project site media to S3.
"""
env.gzip_path = '%(path)s/repository/%(project_name)s/gzip/assets/' % env
run(('s3cmd -P --add-header=Content-encoding:gzip --guess-mime-type --rexclude-from=%(path)s/repository/s3exclude sync %(gzip_path)s s3://%(s3_bucket)s/%(project_name)s/%(site_media_prefix)s/') % env)
def reboot():
"""
Restart the Apache2 server.
"""
sudo('/mnt/apps/bin/restart-all-apache.sh')
def maintenance_down():
"""
Reinstall the normal site configuration.
"""
install_apache_conf()
reboot()
"""
Commands - data
"""
def load_new_data():
"""
Erase the current database and load new data from the SQL dump file.
"""
require('settings', provided_by=[production, staging])
print 'Loading data from SQL dumps is not efficient enough for a dataset of this size.'
print 'Instead, SSH in and use ./manage load_shapefiles.'
def create_database(cmd=run):
"""
Creates the user and database for this project.
"""
cmd('echo "CREATE USER %(project_name)s WITH PASSWORD \'%(database_password)s\';" | psql postgres' % env)
cmd('createdb -O %(project_name)s %(project_name)s -T template_postgis' % env)
def sql_init():
run('psql -q %(project_name)s < %(path)s/repository/data/psql/finish_init.sql' % env)
def destroy_database(cmd=run):
"""
Destroys the user and database for this project.
Will not cause the fab to fail if they do not exist.
"""
with settings(warn_only=True):
cmd('dropdb %(project_name)s' % env)
cmd('dropuser %(project_name)s' % env)
def syncdb():
with cd(env.repo_path):
run('export DEPLOYMENT_TARGET=production')
run('echo $DEPLOYMENT_TARGET')
run('source %(env_path)s/bin/activate; ./manage syncdb --noinput --all' % env)
run('source %(env_path)s/bin/activate; ./manage migrate --fake' % env)
def pgpool_down():
"""
Stop pgpool so that it won't prevent the database from being rebuilt.
"""
sudo('/etc/init.d/pgpool stop')
def pgpool_up():
"""
Start pgpool.
"""
sudo('/etc/init.d/pgpool start')
"""
Commands - local
"""
def local_bootstrap():
destroy_database(local)
create_database(local)
local('./manage syncdb --all --noinput')
local('./manage migrate --all --fake')
print 'Ready.\nNow run "./manage load_shapefiles".'
"""
Commands - miscellaneous
"""
def clear_cache():
"""
Restart memcache, wiping the current cache.
"""
sudo('/mnt/apps/bin/restart-memcache.sh')
def echo_host():
"""
Echo the current host to the command line.
"""
run('echo %(settings)s; echo %(hosts)s' % env)
"""
Deaths, destroyers of worlds
"""
def shiva_the_destroyer():
"""
Remove all directories, databases, etc. associated with the application.
"""
with settings(warn_only=True):
run('rm -Rf %(path)s' % env)
run('rm -Rf %(log_path)s' % env)
pgpool_down()
run('dropdb %(project_name)s' % env)
run('dropuser %(project_name)s' % env)
pgpool_up()
sudo('rm %(apache_config_path)s' % env)
sudo('rm %(apache_config_path)s_api' % env)
reboot()
run('s3cmd del --recursive s3://%(s3_bucket)s/%(project_name)s' % env)
"""
Utility functions (not to be called directly)
"""
def _execute_psql(query):
"""
Executes a PostgreSQL command using the command line interface.
"""
env.query = query
run(('cd %(path)s/repository; psql -q %(project_name)s -c "%(query)s"') % env)
def _confirm_branch():
if (env.settings == 'production' and env.branch != 'stable'):
answer = prompt("You are trying to deploy the '%(branch)s' branch to production.\nYou should really only deploy a stable branch.\nDo you know what you're doing?" % env, default="Not at all")
if answer not in ('y','Y','yes','Yes','buzz off','screw you'):
exit()
|
|
# -*- coding: utf-8 -*-
"""
Evacuees Registry
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.set_method("pr", "group",
method = "add_members",
action = s3db.evr_AddGroupMembers)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
return s3db.cms_index(module, alt_function="index_alt")
# -----------------------------------------------------------------------------
def index_alt():
"""
Module homepage for non-Admin users when no CMS content found
"""
# Just redirect to the list of Cases
redirect(URL(f="person"))
# -----------------------------------------------------------------------------
def person():
"""
REST controller to register evacuees
"""
# @todo: this will not allow pre-existing person records
# to be added as EVR cases - need a filter+action
# solution instead
s3.filter = s3base.S3FieldSelector("case.id") != None
# Custom Method for Contacts
s3db.set_method("pr", "person",
method = "contacts",
action = s3db.pr_contacts)
def prep(r):
fiscal_code = s3db.evr_case.fiscal_code
levels = current.gis.get_relevant_hierarchy_levels()
if r.method == "update":
fiscal_code.requires = None
else:
fiscal_code.requires = \
IS_EMPTY_OR(IS_NOT_IN_DB(db(db.evr_case.deleted != True),
fiscal_code),
null=""
)
report_fields = ["id",
"last_name",
"case.organisation_id",
"gender",
"date_of_birth",
"person_details.nationality",
"person_details.marital_status",
"shelter_registration.shelter_id",
"shelter_registration.check_in_date",
"shelter_registration.check_out_date",
]
if settings.get_cr_shelter_housing_unit_management():
report_fields.append("shelter_registration.shelter_unit_id")
for level in levels:
lfield = "location_id$%s" % level
report_fields.append(lfield)
report_options = Storage(
rows=report_fields,
cols=report_fields,
fact=report_fields,
defaults=Storage(
rows="shelter_registration.shelter_id",
cols="gender",
#totals=True,
)
)
list_fields = ["id",
"first_name",
#"middle_name",
"last_name",
"gender",
"date_of_birth",
]
if settings.get_evr_link_to_organisation():
list_fields.append("case.organisation_id")
list_fields.append("shelter_registration.shelter_id")
if settings.get_cr_shelter_housing_unit_management():
list_fields.append("shelter_registration.shelter_unit_id")
list_fields.append("shelter_registration.check_in_date")
list_fields.append("shelter_registration.check_out_date")
r.resource.configure(list_fields = list_fields,
report_options = report_options)
if r.interactive:
if not r.component:
resource = r.resource
# Filter widgets
from s3 import S3OptionsFilter, S3TextFilter, S3LocationFilter, S3DateFilter
filter_widgets = [
S3TextFilter(["first_name",
#"middle_name",
"last_name",
#"local_name",
"identity.value",
"case.fiscal_code",
],
label = T("Name and/or ID"),
comment = T("To search for a person, enter any of the "
"first, middle or last names and/or an ID "
"number of a person, separated by spaces. "
"You may use % as wildcard."),
),
S3LocationFilter("address.location_id",
label = T("Current Residence"),
levels = levels,
),
S3DateFilter("date_of_birth",
label = T("Date Of Birth")
),
S3OptionsFilter("person_details.nationality",
label = T("Nationality"),
),
S3OptionsFilter("case.organisation_id",
label = T("Organisation"),
),
S3OptionsFilter("shelter_registration.shelter_id",
label = T("Shelter"),
),
S3OptionsFilter("shelter_registration.registration_status",
label = T("Registration Status"),
),
]
# Custom Form for Persons
from s3 import S3SQLCustomForm, S3SQLInlineComponent
crud_form = S3SQLCustomForm("case.organisation_id",
"first_name",
#"middle_name",
"last_name",
"date_of_birth",
"location_id",
"person_details.place_of_birth",
"case.fiscal_code",
S3SQLInlineComponent(
"identity",
label = T("Identity Documents"),
fields = ["type",
"value",
],
),
"person_details.nationality",
"gender",
"person_details.marital_status",
"person_details.religion",
"person_details.occupation",
#"person_details.company",
"comments",
)
resource.configure(crud_form = crud_form,
filter_widgets = filter_widgets,
)
elif r.component_name == "shelter_registration":
if settings.get_cr_shelter_housing_unit_management():
# Dynamically update options for shelter_unit_id
# when a shelter_id gets selected
from s3 import SEPARATORS
options = {"trigger": "shelter_id",
"target": "shelter_unit_id",
"lookupPrefix": "cr",
"lookupResource": "shelter_unit",
}
s3.jquery_ready.append('''$.filterOptionsS3(%s)''' % \
json.dumps(options,
separators=SEPARATORS))
elif r.representation in ("pdf", "xls"):
# List fields
list_fields = ["id",
"first_name",
#"middle_name",
"last_name",
"gender",
#"date_of_birth",
(T("Age"), "age"),
"person_details.nationality",
"person_details.religion",
(T("Contact"), "contact.value"),
(T("Shelter"), "shelter_registration.shelter_id$name")
]
r.resource.configure(list_fields=list_fields)
return True
s3.prep = prep
return s3_rest_controller("pr", "person",
rheader = s3db.evr_rheader)
# -----------------------------------------------------------------------------
def group():
"""
REST controller to register families and other groups
"""
evr_group_types = settings.get_evr_group_types()
# Pre-process
def prep(r):
resource = r.resource
if not r.component:
FS = s3base.S3FieldSelector
query = (FS("system") == False) & \
(FS("group_type").belongs(evr_group_types.keys()))
resource.add_filter(query)
# Fields to be displayed in the group table
resource.configure(# Redirect to member list when a new group
# has been created
create_next = URL(f="group",
args=["[id]",
"group_membership"],
),
list_fields = ["id",
"name",
"description",
"group_type",
"group_membership.person_id",
(T("Contact"), "contact.value")
],
)
if r.interactive:
# Override the options for group_type,
# only show evr_group_types
resource.table.group_type.requires = IS_IN_SET(evr_group_types,
zero=None)
component = r.component
if component and component.name == "group_membership":
component.configure(list_fields = ["id",
"group_id$name",
"group_id$description",
"group_id$group_type",
"person_id",
"person_id$date_of_birth",
"group_head"
],
# No embedded add-form
listadd = False,
)
return True
s3.prep = prep
# Post-process
def postp(r, output):
if r.interactive:
component = r.component
if not component:
update_url = URL(args=["[id]", "group_membership"])
elif component.name == "group_membership" and not r.method:
# Custom add-button that redirects to the add_members
# action (opens on a separate tab)
buttons = output.get("buttons", {})
buttons["add_btn"] = A(T("Add Members"),
_href = r.url(component = "",
method = "add_members",
),
_class = "action-btn add",
)
output["buttons"] = buttons
return output
s3.postp = postp
output = s3_rest_controller("pr", "group",
rheader = s3db.evr_rheader)
return output
# END =========================================================================
|
|
#!/usr/bin/env python
# @license
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for creating screenshots with Neuroglancer.
The Neuroglancer state may be specified either by a URL or by a path to a JSON
state file.
Rendering requires a web browser. By default, a headless chromedriver is
started in the background. It is also possible to use non-headless chromedriver
or a manually-opened browser.
There are several methods by which the screenshot image may be rendered:
1. The state can be rendered directly as a single frame by Neuroglancer. This
is the simplest and fastest method and works for most states.
2. If the output image size exceeds what Neuroglancer/the browser can support
(usually about 4096x4096), tiled rendering can be used. In this case,
Neuroglancer will render the image as multiple tiles which are assembled
automatically into a single image. This is enabled automatically if the
requested image size exceeds the specified tile dimensions. All normal
functionality is supported, except for the "show_slices" option whereby
cross-section panels are also shown in the 3-d view. Manually-specified
cross sections via the "cross_sections" property are supported, however.
3. If a very large number of 3-d objects are to be rendered, it may be
impossible for Neuroglancer to render them all simultaneously due to memory
limits. The `--segment-shard-size` option may be specified to enable a
special rendering mode in which subsets of the objects are rendered
independently and then combined together into a single image. Depth
information is used to combine the images together. Currently, transparent
rendering of objects is not supported, though. As the final image is
produced incrementally, the state is saved in a `.npz` file, which allows
resuming if the screenshot process is interrupted. To avoid resuming if you
change options, delete the `.npz` file.
Tips:
- The Neuroglancer UI controls are not shown, and in the case of multi-panel
layouts, there is no border between panels. In most cases it is desirable to
capture a single-panel layout.
- The layer side panel and statistics panel, if open, will be closed for the
screenshot.
- The specified image dimensions will be used, rather than the dimensions of
your browser window. This, in combination with the removal of the normal
Neuroglancer UI controls, means that the field of view may differ somewhat.
- The axis lines and volume bounding boxes will be shown if they are enabled in
the Neuroglancer state. If you don't want them in the screenshot, you should
disable them in the Neuroglancer state. You may also use the
`--hide-axis-lines` and `--hide-default-annotations` options. In most cases
it is desirable to hide the axis lines and default annotations.
- The scale bars will be shown if they are enabled in the Neuroglancer state.
If you specify a large image size, you may want to increase the size of the
scale bar, using the `--scale-bar-scale` option.
"""
import argparse
import collections
import contextlib
import copy
import datetime
import itertools
import numbers
import os
import threading
import time
from typing import NamedTuple, Tuple, Callable, Iterator, List, Optional
import PIL
import numpy as np
import neuroglancer
import neuroglancer.cli
import neuroglancer.webdriver
def _get_total_segments(state):
num_segments = 0
for layer in state.layers:
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
num_segments += len(layer.segments)
return num_segments
def _should_shard_segments(state, segment_shard_size):
return _get_total_segments(state) > segment_shard_size
def _calculate_num_shards(state, segment_shard_size):
total_segments = _get_total_segments(state)
return -(-total_segments // segment_shard_size)
def _get_sharded_states(state, segment_shard_size, reverse_bits):
if reverse_bits:
sort_key = lambda x: int('{:064b}'.format(x)[::-1], 2)
else:
sort_key = None
num_shards = _calculate_num_shards(state, segment_shard_size)
for shard_i in range(num_shards):
new_state = copy.deepcopy(state)
cum_retained = 0
cum_skipped = segment_shard_size * shard_i
for i, layer in enumerate(new_state.layers):
if not isinstance(layer.layer, neuroglancer.SegmentationLayer):
continue
segments = sorted(layer.segments, key=sort_key)
num_to_skip = min(cum_skipped, len(segments))
segments = segments[num_to_skip:]
cum_skipped += num_to_skip
num_to_retain = min(segment_shard_size - cum_retained, len(segments))
cum_retained += num_to_retain
layer.segments = set(segments[:num_to_retain])
yield new_state
class TileGenerator:
def __init__(self, shape, tile_shape):
self.tile_shape = tuple(tile_shape)
self.shape = tuple(shape)
self.tile_grid_shape = tuple(-(-self.shape[i] // self.tile_shape[i]) for i in range(2))
self.tile_shape = tuple(-(-self.shape[i] // self.tile_grid_shape[i]) for i in range(2))
self.num_tiles = self.tile_grid_shape[0] * self.tile_grid_shape[1]
def get_tile_states(self, state):
for tile_y in range(self.tile_grid_shape[1]):
for tile_x in range(self.tile_grid_shape[0]):
x_offset = tile_x * self.tile_shape[0]
y_offset = tile_y * self.tile_shape[1]
tile_width = min(self.tile_shape[0], self.shape[0] - x_offset)
tile_height = min(self.tile_shape[1], self.shape[1] - y_offset)
new_state = copy.deepcopy(state)
new_state.partial_viewport = [
x_offset / self.shape[0], y_offset / self.shape[1], tile_width / self.shape[0],
tile_height / self.shape[1]
]
params = {
'tile_x': tile_x,
'tile_y': tile_y,
'x_offset': x_offset,
'y_offset': y_offset,
'tile_width': tile_width,
'tile_height': tile_height,
}
yield params, new_state
class ShardedTileGenerator(TileGenerator):
def __init__(self, state, segment_shard_size, reverse_bits, **kwargs):
super(ShardedTileGenerator, self).__init__(**kwargs)
self.state = state
self.reverse_bits = reverse_bits
self.total_segments = _get_total_segments(self.state)
self.segment_shard_size = segment_shard_size
self.num_shards = _calculate_num_shards(self.state, self.segment_shard_size)
self.num_tiles *= self.num_shards
def get_states(self):
for shard_i, state in enumerate(
_get_sharded_states(self.state,
self.segment_shard_size,
reverse_bits=self.reverse_bits)):
for params, state in self.get_tile_states(state):
params['segment_shard'] = shard_i
yield params, state
class CaptureScreenshotRequest(NamedTuple):
state: neuroglancer.ViewerState
description: str
config_callback: Callable[[neuroglancer.viewer_config_state.ConfigState], None]
response_callback: neuroglancer.viewer_config_state.ScreenshotReply
include_depth: bool = False
def buffered_iterator(base_iter, lock, buffer_size):
while True:
with lock:
buffered_items = list(itertools.islice(base_iter, buffer_size))
if not buffered_items: break
for item in buffered_items:
yield item
def capture_screenshots(viewer: neuroglancer.Viewer,
request_iter: Iterator[CaptureScreenshotRequest],
refresh_browser_callback: Callable[[], None],
refresh_browser_timeout: int,
num_to_prefetch: int = 1) -> None:
prefetch_buffer = list(itertools.islice(request_iter, num_to_prefetch + 1))
while prefetch_buffer:
with viewer.config_state.txn() as s:
s.show_ui_controls = False
s.show_panel_borders = False
del s.prefetch[:]
for i, request in enumerate(prefetch_buffer[1:]):
s.prefetch.append(
neuroglancer.PrefetchState(state=request.state, priority=num_to_prefetch - i))
request = prefetch_buffer[0]
request.config_callback(s)
viewer.set_state(request.state)
print('%s [%s] Requesting screenshot' % (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
))
last_statistics_time = time.time()
def statistics_callback(statistics):
nonlocal last_statistics_time
last_statistics_time = time.time()
total = statistics.total
print(
'%s [%s] Screenshot in progress: %6d/%6d chunks loaded (%10d bytes), %3d downloading'
% (
datetime.datetime.now().strftime('%Y-%m-%dT%H:%M%S.%f'),
request.description,
total.visible_chunks_gpu_memory,
total.visible_chunks_total,
total.visible_gpu_memory,
total.visible_chunks_downloading,
))
event = threading.Event()
screenshot = None
def result_callback(s):
nonlocal screenshot
screenshot = s.screenshot
event.set()
viewer.async_screenshot(
result_callback,
include_depth=request.include_depth,
statistics_callback=statistics_callback,
)
def get_timeout():
return max(0, last_statistics_time + refresh_browser_timeout - time.time())
while True:
if event.wait(get_timeout()):
break
if get_timeout() > 0:
continue
last_statistics_time = time.time()
refresh_browser_callback()
request.response_callback(screenshot)
del prefetch_buffer[0]
next_request = next(request_iter, None)
if next_request is not None:
prefetch_buffer.append(next_request)
def capture_screenshots_in_parallel(viewers: List[Tuple[neuroglancer.Viewer, Callable[[], None]]],
request_iter: Iterator[CaptureScreenshotRequest],
refresh_browser_timeout: numbers.Number, num_to_prefetch: int,
total_requests: Optional[int] = None,
buffer_size: Optional[int] = None):
if buffer_size is None:
if total_requests is None:
copy_of_requests = list(request_iter)
total_requests = len(copy_of_requests)
request_iter = iter(copy_of_requests)
buffer_size = max(1, total_requests // (len(viewers) * 4))
request_iter = iter(request_iter)
threads = []
buffer_lock = threading.Lock()
for viewer, refresh_browser_callback in viewers:
def capture_func(viewer, refresh_browser_callback):
viewer_request_iter = buffered_iterator(base_iter=request_iter,
lock=buffer_lock,
buffer_size=buffer_size)
capture_screenshots(
viewer=viewer,
request_iter=viewer_request_iter,
num_to_prefetch=num_to_prefetch,
refresh_browser_timeout=refresh_browser_timeout,
refresh_browser_callback=refresh_browser_callback,
)
t = threading.Thread(target=capture_func, args=(viewer, refresh_browser_callback))
t.start()
threads.append(t)
for t in threads:
t.join()
class MultiCapturer:
def __init__(self,
shape,
include_depth,
output,
config_callback,
num_to_prefetch,
checkpoint_interval=60):
self.include_depth = include_depth
self.checkpoint_interval = checkpoint_interval
self.config_callback = config_callback
self.num_to_prefetch = num_to_prefetch
self.output = output
self._processed = set()
self.state_file = output + '.npz'
self.temp_state_file = self.state_file + '.tmp'
self.image_array = np.zeros((shape[1], shape[0], 4), dtype=np.uint8)
if self.include_depth:
self.depth_array = np.zeros((shape[1], shape[0]), dtype=np.float32)
self._load_state()
self._add_image_lock = threading.Lock()
self._last_save_time = time.time()
self._save_state_in_progress = threading.Event()
self._save_state_in_progress.set()
self._num_states_processed = 0
self._start_time = time.time()
def _load_state(self):
if not os.path.exists(self.state_file):
return
with np.load(self.state_file, allow_pickle=True) as f:
if self.include_depth:
self.depth_array = f['depth']
self.image_array = f['image']
self._processed = set(f['processed'].ravel()[0])
def _save_state(self, save_image=False):
with self._add_image_lock:
processed = set(self._processed)
with open(self.temp_state_file, 'wb') as f:
save_arrays = {
'image': self.image_array,
'processed': processed,
}
if self.include_depth:
save_arrays['depth'] = self.depth_array
np.savez_compressed(f, **save_arrays)
os.replace(self.temp_state_file, self.state_file)
if save_image:
self._save_image()
def _save_state_async(self, save_image=False):
print('Starting checkpointing')
def func():
try:
self._save_state()
print('Done checkpointing')
finally:
self._save_state_in_progress.set()
threading.Thread(target=func, daemon=True).start()
def _save_image(self):
im = PIL.Image.fromarray(self.image_array)
im.save(self.output)
def _add_image(self, params, screenshot):
with self._add_image_lock:
tile_image = screenshot.image_pixels
tile_selector = np.s_[params['y_offset']:params['y_offset'] + params['tile_height'],
params['x_offset']:params['x_offset'] + params['tile_width']]
if self.include_depth:
tile_depth = screenshot.depth_array
depth_array_part = self.depth_array[tile_selector]
mask = np.logical_and(np.logical_or(tile_depth != 0, depth_array_part == 0),
tile_depth >= depth_array_part)
depth_array_part[mask] = tile_depth[mask]
else:
mask = Ellipsis
self.image_array[tile_selector][mask] = tile_image[mask]
self._processed.add(self._get_description(params))
self._num_states_processed += 1
elapsed = time.time() - self._start_time
print('%4d tiles rendered in %5d seconds: %.1f seconds/tile' %
(self._num_states_processed, elapsed, elapsed / self._num_states_processed))
def _maybe_save_state(self):
if not self._save_state_in_progress.is_set(): return
with self._add_image_lock:
if self._last_save_time + self.checkpoint_interval < time.time():
self._last_save_time = time.time()
self._save_state_in_progress.clear()
self._save_state_async(save_image=False)
def _get_description(self, params):
segment_shard = params.get('segment_shard')
if segment_shard is not None:
prefix = 'segment_shard=%d ' % (segment_shard, )
else:
prefix = ''
return '%stile_x=%d tile_y=%d' % (prefix, params['tile_x'], params['tile_y'])
def _make_capture_request(self, params, state):
description = self._get_description(params)
if description in self._processed: return None
def config_callback(s):
s.viewer_size = (params['tile_width'], params['tile_height'])
self.config_callback(s)
def response_callback(screenshot):
self._add_image(params, screenshot)
self._maybe_save_state()
return CaptureScreenshotRequest(state=state,
description=self._get_description(params),
config_callback=config_callback,
response_callback=response_callback,
include_depth=self.include_depth)
def _get_capture_screenshot_request_iter(self, state_iter):
for params, state in state_iter:
request = self._make_capture_request(params, state)
if request is not None: yield request
def capture(self, viewers, state_iter, refresh_browser_timeout: int, save_depth: bool, total_requests: int):
capture_screenshots_in_parallel(
viewers=viewers,
request_iter=self._get_capture_screenshot_request_iter(state_iter),
refresh_browser_timeout=refresh_browser_timeout,
num_to_prefetch=self.num_to_prefetch,
total_requests=total_requests)
if not self._save_state_in_progress.is_set():
print('Waiting for previous save state to complete')
self._save_state_in_progress.wait()
if save_depth:
self._save_state()
else:
self._save_image()
if os.path.exists(self.state_file):
os.remove(self.state_file)
def capture_image(viewers, args, state):
def config_callback(s):
s.scale_bar_options.scale_factor = args.scale_bar_scale
segment_shard_size = args.segment_shard_size
tile_parameters = dict(
shape=(args.width, args.height),
tile_shape=(args.tile_width, args.tile_height),
)
if segment_shard_size is not None and _should_shard_segments(state, segment_shard_size):
gen = ShardedTileGenerator(state=state,
segment_shard_size=segment_shard_size,
reverse_bits=args.sort_segments_by_reversed_bits,
**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_states()
include_depth = True
else:
gen = TileGenerator(**tile_parameters)
num_states = gen.num_tiles
state_iter = gen.get_tile_states(state)
include_depth = False
capturer = MultiCapturer(
shape=tile_parameters['shape'],
include_depth=include_depth,
output=args.output,
config_callback=config_callback,
num_to_prefetch=args.prefetch,
checkpoint_interval=args.checkpoint_interval,
)
num_output_shards = args.num_output_shards
tiles_per_output_shard = args.tiles_per_output_shard
output_shard = args.output_shard
if (output_shard is None) != (num_output_shards is None and tiles_per_output_shard is None):
raise ValueError(
'--output-shard must be specified in combination with --num-output-shards or --tiles-per-output-shard'
)
if output_shard is not None:
if num_output_shards is not None:
if num_output_shards < 1:
raise ValueError('Invalid --num-output-shards: %d' % (num_output_shards, ))
states_per_shard = -(-num_states // num_output_shards)
else:
if tiles_per_output_shard < 1:
raise ValueError('Invalid --tiles-per-output-shard: %d' %
(tiles_per_output_shard, ))
num_output_shards = -(-num_states // tiles_per_output_shard)
states_per_shard = tiles_per_output_shard
if output_shard < 0 or output_shard >= num_output_shards:
raise ValueError('Invalid --output-shard: %d' % (output_shard, ))
print('Total states: %d, Number of output shards: %d' % (num_states, num_output_shards))
state_iter = itertools.islice(state_iter, states_per_shard * output_shard,
states_per_shard * (output_shard + 1))
else:
states_per_shard = num_states
capturer.capture(
viewers=viewers,
state_iter=state_iter,
refresh_browser_timeout=args.refresh_browser_timeout,
save_depth=output_shard is not None,
total_requests=states_per_shard,
)
def define_state_modification_args(ap: argparse.ArgumentParser):
ap.add_argument('--hide-axis-lines',
dest='show_axis_lines',
action='store_false',
help='Override showAxisLines setting in state.')
ap.add_argument('--hide-default-annotations',
action='store_false',
dest='show_default_annotations',
help='Override showDefaultAnnotations setting in state.')
ap.add_argument('--projection-scale-multiplier',
type=float,
help='Multiply projection view scale by specified factor.')
ap.add_argument('--system-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='System memory limit')
ap.add_argument('--gpu-memory-limit',
type=int,
default=3 * 1024 * 1024 * 1024,
help='GPU memory limit')
ap.add_argument('--concurrent-downloads', type=int, default=32, help='Concurrent downloads')
ap.add_argument('--layout', type=str, help='Override layout setting in state.')
ap.add_argument('--cross-section-background-color',
type=str,
help='Background color for cross sections.')
ap.add_argument('--scale-bar-scale', type=float, help='Scale factor for scale bar', default=1)
def apply_state_modifications(state: neuroglancer.ViewerState, args: argparse.Namespace):
state.selected_layer.visible = False
state.statistics.visible = False
if args.layout is not None:
state.layout = args.layout
if args.show_axis_lines is not None:
state.show_axis_lines = args.show_axis_lines
if args.show_default_annotations is not None:
state.show_default_annotations = args.show_default_annotations
if args.projection_scale_multiplier is not None:
state.projection_scale *= args.projection_scale_multiplier
if args.cross_section_background_color is not None:
state.cross_section_background_color = args.cross_section_background_color
state.gpu_memory_limit = args.gpu_memory_limit
state.system_memory_limit = args.system_memory_limit
state.concurrent_downloads = args.concurrent_downloads
def define_viewer_args(ap: argparse.ArgumentParser):
ap.add_argument('--browser', choices=['chrome', 'firefox'], default='chrome')
ap.add_argument('--no-webdriver',
action='store_true',
help='Do not open browser automatically via webdriver.')
ap.add_argument('--no-headless',
dest='headless',
action='store_false',
help='Use non-headless webdriver.')
ap.add_argument('--docker-chromedriver',
action='store_true',
help='Run Chromedriver with options suitable for running inside docker')
ap.add_argument('--debug-chromedriver',
action='store_true',
help='Enable debug logging in Chromedriver')
ap.add_argument('--jobs',
'-j',
type=int,
default=1,
help='Number of browsers to use concurrently. '
'This may improve performance at the cost of greater memory usage. '
'On a 64GiB 16 hyperthread machine, --jobs=6 works well.')
def define_size_args(ap: argparse.ArgumentParser):
ap.add_argument('--width', type=int, default=3840, help='Width in pixels of image.')
ap.add_argument('--height', type=int, default=2160, help='Height in pixels of image.')
def define_tile_args(ap: argparse.ArgumentParser):
ap.add_argument(
'--tile-width',
type=int,
default=4096,
help=
'Width in pixels of single tile. If total width is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument(
'--tile-height',
type=int,
default=4096,
help=
'Height in pixels of single tile. If total height is larger, the screenshot will be captured as multiple tiles.'
)
ap.add_argument('--segment-shard-size',
type=int,
help='Maximum number of segments to render simultaneously. '
'If the number of selected segments exceeds this number, '
'multiple passes will be used (transparency not supported).')
ap.add_argument(
'--sort-segments-by-reversed-bits',
action='store_true',
help=
'When --segment-shard-size is also specified, normally segment ids are ordered numerically before being partitioned into shards. If segment ids are spatially correlated, then this can lead to slower and more memory-intensive rendering. If --sort-segments-by-reversed-bits is specified, segment ids are instead ordered by their bit reversed values, which may avoid the spatial correlation.'
)
def define_capture_args(ap: argparse.ArgumentParser):
ap.add_argument('--prefetch', type=int, default=1, help='Number of states to prefetch.')
ap.add_argument(
'--refresh-browser-timeout',
type=int,
default=60,
help=
'Number of seconds without receiving statistics while capturing a screenshot before browser is considered unresponsive.'
)
@contextlib.contextmanager
def get_viewers(args: argparse.Namespace):
if args.no_webdriver:
viewers = [neuroglancer.Viewer() for _ in range(args.jobs)]
print('Open the following URLs to begin rendering')
for viewer in viewers:
print(viewer)
def refresh_browser_callback():
print('Browser unresponsive, consider reloading')
yield [(viewer, refresh_browser_callback) for viewer in viewers]
else:
def _make_webdriver():
webdriver = neuroglancer.webdriver.Webdriver(
headless=args.headless,
docker=args.docker_chromedriver,
debug=args.debug_chromedriver,
browser=args.browser,
)
def refresh_browser_callback():
print('Browser unresponsive, reloading')
webdriver.reload_browser()
return webdriver, refresh_browser_callback
webdrivers = [_make_webdriver() for _ in range(args.jobs)]
try:
yield [(webdriver.viewer, refresh_browser_callback)
for webdriver, refresh_browser_callback in webdrivers]
finally:
for webdriver, _ in webdrivers:
try:
webdriver.__exit__()
except:
pass
def run(args: argparse.Namespace):
neuroglancer.cli.handle_server_arguments(args)
state = args.state
apply_state_modifications(state, args)
with get_viewers(args) as viewers:
capture_image(viewers, args, state)
def main(args=None):
ap = argparse.ArgumentParser()
neuroglancer.cli.add_server_arguments(ap)
neuroglancer.cli.add_state_arguments(ap, required=True)
ap.add_argument('output', help='Output path of screenshot file in PNG format.')
ap.add_argument('--output-shard', type=int, help='Output shard to write.')
output_shard_group = ap.add_mutually_exclusive_group(required=False)
output_shard_group.add_argument('--num-output-shards',
type=int,
help='Number of output shards.')
output_shard_group.add_argument('--tiles-per-output-shard',
type=int,
help='Number of tiles per output shard.')
ap.add_argument('--checkpoint-interval',
type=float,
default=60,
help='Interval in seconds at which to save checkpoints.')
define_state_modification_args(ap)
define_viewer_args(ap)
define_size_args(ap)
define_tile_args(ap)
define_capture_args(ap)
run(ap.parse_args(args))
if __name__ == '__main__':
main()
|
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import doctest
import json
import os
import re
import unittest
import multiprocessing
import mock
import tempfile
from datetime import datetime, time, timedelta
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import signal
from time import sleep
import warnings
from dateutil.relativedelta import relativedelta
from airflow import configuration
from airflow.executors import SequentialExecutor, LocalExecutor
from airflow.models import Variable
configuration.test_mode()
from airflow import jobs, models, DAG, utils, macros, settings, exceptions
from airflow.models import BaseOperator
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.http_operator import SimpleHttpOperator
from airflow.operators import sensors
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.bin import cli
from airflow.www import app as application
from airflow.settings import Session
from airflow.utils.state import State
from airflow.utils.dates import round_time
from airflow.utils.logging import LoggingMixin
from lxml import html
from airflow.exceptions import AirflowException
from airflow.configuration import AirflowConfigException
import six
NUM_EXAMPLE_DAGS = 16
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
try:
import cPickle as pickle
except ImportError:
# Python 3
import pickle
class FakeDatetime(datetime):
"A fake replacement for datetime that can be mocked for testing."
def __new__(cls, *args, **kwargs):
return date.__new__(datetime, *args, **kwargs)
def reset(dag_id=TEST_DAG_ID):
session = Session()
tis = session.query(models.TaskInstance).filter_by(dag_id=dag_id)
tis.delete()
session.commit()
session.close()
reset()
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super(OperatorSubclass, self).__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(*args, **kwargs):
pass
class CoreTest(unittest.TestCase):
# These defaults make the test faster to run
default_scheduler_args = {"file_process_interval": 0,
"processor_poll_interval": 0.5,
"num_runs": 1}
def setUp(self):
configuration.test_mode()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag = dag
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_previous_runs')
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
assert dag_run is not None
assert dag_run.dag_id == dag.dag_id
assert dag_run.run_id is not None
assert dag_run.run_id != ''
assert dag_run.execution_date == datetime(2015, 1, 2, 0, 0), (
'dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date))
assert dag_run.state == State.RUNNING
assert dag_run.external_trigger == False
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous',
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=models.DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
assert dag_run is not None
assert dag_run.dag_id == dag.dag_id
assert dag_run.run_id is not None
assert dag_run.run_id != ''
assert dag_run.execution_date == DEFAULT_DATE + delta, (
'dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date))
assert dag_run.state == State.RUNNING
assert dag_run.external_trigger == False
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_once')
dag.schedule_interval = '@once'
dag.add_task(models.BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
assert dag_run is not None
assert dag_run2 is None
dag.clear()
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_start_end_dates',
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
assert dag_run is not None
assert additional_dag_run is None
@mock.patch('airflow.jobs.datetime', FakeDatetime)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
from datetime import datetime
FakeDatetime.now = classmethod(lambda cls: datetime(2016, 1, 1))
session = settings.Session()
delta = timedelta(days=1)
start_date = DEFAULT_DATE
runs = 365
dag = DAG(TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only',
start_date=start_date,
schedule_interval=delta)
dag.add_task(models.BaseOperator(task_id='faketastic',
owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for i in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
assert dag_run is not None
assert additional_dag_run is None
def test_confirm_unittest_mod(self):
assert configuration.get('core', 'unit_test_mode')
def test_pickling(self):
dp = self.dag.pickle()
assert self.dag.dag_id == dp.pickle.dag_id
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
assert self.dag == self.dag
# test dag (in)equality based on _comps
assert self.dag == dag_eq
assert self.dag != dag_diff_name
assert self.dag != dag_diff_load_time
# test dag inequality based on type even if _comps happen to match
assert self.dag != dag_subclass
# a dag should equal an unpickled version of itself
assert self.dag == pickle.loads(pickle.dumps(self.dag))
# dags are ordered based on dag_id no matter what the type is
assert self.dag < dag_diff_name
assert not self.dag < dag_diff_load_time
assert self.dag < dag_subclass_diff_name
# greater than should have been created automatically by functools
assert dag_diff_name > self.dag
# hashes are non-random and match equality
assert hash(self.dag) == hash(self.dag)
assert hash(self.dag) == hash(dag_eq)
assert hash(self.dag) != hash(dag_diff_name)
assert hash(self.dag) != hash(dag_subclass)
def test_time_sensor(self):
t = sensors.TimeSensor(
task_id='time_sensor_check',
target_time=time(0),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
t = BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
'Invalid arguments were passed to BashOperator.',
w[0].message.args[0])
def test_bash_operator(self):
t = BashOperator(
task_id='time_sensor_check',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command=u"echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_trigger_dagrun(self):
def trigga(context, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_dryrun(self):
t = BashOperator(
task_id='time_sensor_check',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_timedelta_sensor(self):
t = sensors.TimeDeltaSensor(
task_id='timedelta_sensor_check',
delta=timedelta(seconds=2),
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_external_task_sensor(self):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_external_task_sensor_delta(self):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_delta=timedelta(0),
allowed_states=['success'],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_external_task_sensor_fn(self):
self.test_time_sensor()
# check that the execution_fn works
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_date_fn=lambda dt: dt + timedelta(0),
allowed_states=['success'],
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
# double check that the execution is being called by failing the test
t2 = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_date_fn=lambda dt: dt + timedelta(days=1),
allowed_states=['success'],
timeout=1,
poke_interval=1,
dag=self.dag)
with self.assertRaises(exceptions.AirflowSensorTimeout):
t2.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_external_task_sensor_error_delta_and_fn(self):
"""
Test that providing execution_delta and a function raises an error
"""
with self.assertRaises(ValueError):
t = sensors.ExternalTaskSensor(
task_id='test_external_task_sensor_check_delta',
external_dag_id=TEST_DAG_ID,
external_task_id='time_sensor_check',
execution_delta=timedelta(0),
execution_date_fn=lambda dt: dt,
allowed_states=['success'],
dag=self.dag)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
provide_context=True,
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1], context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'success':False,
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
assert val['success']
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'success': False,
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
assert val['success']
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'success': False,
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
u'{"foo": "bar"}')
val['success'] = True
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
on_success_callback=verify_templated_field,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
assert val['success']
def test_import_examples(self):
self.assertEqual(len(self.dagbag.dags), NUM_EXAMPLE_DAGS)
def test_local_task_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, force=True)
job.run()
def test_scheduler_job(self):
job = jobs.SchedulerJob(dag_id='example_bash_operator',
**self.default_scheduler_args)
job.run()
def test_raw_job(self):
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(force=True)
def test_doctests(self):
modules = [utils, macros]
for mod in modules:
failed, tests = doctest.testmod(mod)
if failed:
raise Exception("Failed a doctest")
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
assert "Monday morning breakfast" == Variable.get("tested_var_set_id")
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
assert value == Variable.get("tested_var_set_id", deserialize_json=True)
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
assert default_value == Variable.get("thisIdDoesNotExist",
default_var=default_value)
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
assert default_value == Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
assert "[core]" in cfg
assert "dags_folder" in cfg
assert "sql_alchemy_conn" in cfg
assert "fernet_key" in cfg
# making sure replacement actually happened
assert "{AIRFLOW_HOME}" not in cfg
assert "{FERNET_KEY}" not in cfg
def test_config_use_original_when_original_and_fallback_are_present(self):
assert configuration.has_option("core", "FERNET_KEY")
assert not configuration.has_option("core", "FERNET_KEY_CMD")
FERNET_KEY = configuration.get('core', 'FERNET_KEY')
configuration.set("core", "FERNET_KEY_CMD", "printf HELLO")
FALLBACK_FERNET_KEY = configuration.get(
"core",
"FERNET_KEY"
)
assert FALLBACK_FERNET_KEY == FERNET_KEY
# restore the conf back to the original state
configuration.remove_option("core", "FERNET_KEY_CMD")
def test_config_throw_error_when_original_and_fallback_is_absent(self):
assert configuration.has_option("core", "FERNET_KEY")
assert not configuration.has_option("core", "FERNET_KEY_CMD")
FERNET_KEY = configuration.get("core", "FERNET_KEY")
configuration.remove_option("core", "FERNET_KEY")
with self.assertRaises(AirflowConfigException) as cm:
configuration.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
assert exception == message
# restore the conf back to the original state
configuration.set("core", "FERNET_KEY", FERNET_KEY)
assert configuration.has_option("core", "FERNET_KEY")
def test_class_with_logger_should_have_logger_with_correct_name(self):
# each class should automatically receive a logger with a correct name
class Blah(LoggingMixin):
pass
assert Blah().logger.name == "tests.core.Blah"
assert SequentialExecutor().logger.name == "airflow.executors.sequential_executor.SequentialExecutor"
assert LocalExecutor().logger.name == "airflow.executors.local_executor.LocalExecutor"
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
assert rt1 == datetime(2015, 1, 1, 0, 0)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
assert rt2 == datetime(2015, 1, 1, 0, 0)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
assert rt3 == datetime(2015, 9, 16, 0, 0)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
assert rt4 == datetime(2015, 9, 15, 0, 0)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
assert rt5 == datetime(2015, 9, 14, 0, 0)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
assert rt6 == datetime(2015, 9, 14, 0, 0)
def test_duplicate_dependencies(self):
regexp = "Dependency (.*)runme_0(.*)run_after_loop(.*) " \
"already registered"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_downstream(self.run_after_loop)
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_upstream(self.runme_0)
def test_cyclic_dependencies_1(self):
regexp = "Cycle detected in DAG. (.*)runme_0(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.runme_0.set_upstream(self.run_after_loop)
def test_cyclic_dependencies_2(self):
regexp = "Cycle detected in DAG. (.*)run_after_loop(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_after_loop.set_downstream(self.runme_0)
def test_cyclic_dependencies_3(self):
regexp = "Cycle detected in DAG. (.*)run_this_last(.*)"
with self.assertRaisesRegexp(AirflowException, regexp):
self.run_this_last.set_downstream(self.runme_0)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existant",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = models.TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, force=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
assert State.RUNNING == ti.state
ti = (
session.query(TI)
.filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE)
.one()
)
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
assert State.FAILED == ti.state
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
except:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
except:
pass
p_fails = session.query(models.TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(models.TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
print(f_fails)
assert len(p_fails) == 0
assert len(f_fails) == 1
# C
assert sum([f.duration for f in f_fails]) >= 3
class CliTests(unittest.TestCase):
def setUp(self):
configuration.test_mode()
app = application.create_app()
app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
# Persist DAGs
def test_cli_list_dags(self):
args = self.parser.parse_args(['list_dags', '--report'])
cli.list_dags(args)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['list_tasks', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'list_tasks', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_initdb(self):
cli.initdb(self.parser.parse_args(['initdb']))
def test_cli_test(self):
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'task_state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dag_state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'pause', 'example_bash_operator'])
cli.pause(args)
assert self.dagbag.dags['example_bash_operator'].is_paused in [True, 1]
args = self.parser.parse_args([
'unpause', 'example_bash_operator'])
cli.unpause(args)
assert self.dagbag.dags['example_bash_operator'].is_paused in [False, 0]
def test_subdag_clear(self):
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm'])
cli.clear(args)
args = self.parser.parse_args([
'clear', 'example_subdag_operator', '--no_confirm', '--exclude_subdags'])
cli.clear(args)
def test_backfill(self):
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-t', 'runme_0', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '--dry_run',
'-s', DEFAULT_DATE.isoformat()]))
cli.backfill(self.parser.parse_args([
'backfill', 'example_bash_operator', '-l',
'-s', DEFAULT_DATE.isoformat()]))
def test_process_subdir_path_with_placeholder(self):
assert cli.process_subdir('DAGS_FOLDER/abc') == os.path.join(configuration.get_dags_folder(), 'abc')
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'trigger_dag', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"bar"}']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'foo']))
cli.variables(self.parser.parse_args([
'variables', '-g', 'baz', '-d', 'bar']))
cli.variables(self.parser.parse_args([
'variables']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'bar']))
cli.variables(self.parser.parse_args([
'variables', '-i', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-e', DEV_NULL]))
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'original']))
# First export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables(self.parser.parse_args([
'variables', '-s', 'bar', 'updated']))
cli.variables(self.parser.parse_args([
'variables', '-s', 'foo', '{"foo":"oops"}']))
cli.variables(self.parser.parse_args([
'variables', '-x', 'foo']))
# First import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables1.json']))
assert models.Variable.get('bar') == 'original'
assert models.Variable.get('foo') == '{"foo": "bar"}'
# Second export
cli.variables(self.parser.parse_args([
'variables', '-e', 'variables2.json']))
second_exp = open('variables2.json', 'r')
assert second_exp.read() == first_exp.read()
second_exp.close()
first_exp.close()
# Second import
cli.variables(self.parser.parse_args([
'variables', '-i', 'variables2.json']))
assert models.Variable.get('bar') == 'original'
assert models.Variable.get('foo') == '{"foo": "bar"}'
session = settings.Session()
session.query(Variable).delete()
session.commit()
session.close()
os.remove('variables1.json')
os.remove('variables2.json')
class WebUiTests(unittest.TestCase):
def setUp(self):
configuration.test_mode()
configuration.conf.set("webserver", "authenticate", "False")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
self.dagbag = models.DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
def test_index(self):
response = self.app.get('/', follow_redirects=True)
assert "DAGs" in response.data.decode('utf-8')
assert "example_bash_operator" in response.data.decode('utf-8')
def test_query(self):
response = self.app.get('/admin/queryview/')
assert "Ad Hoc Query" in response.data.decode('utf-8')
response = self.app.get(
"/admin/queryview/?"
"conn_id=airflow_db&"
"sql=SELECT+COUNT%281%29+as+TEST+FROM+task_instance")
assert "TEST" in response.data.decode('utf-8')
def test_health(self):
response = self.app.get('/health')
assert 'The server is healthy!' in response.data.decode('utf-8')
def test_dag_views(self):
response = self.app.get(
'/admin/airflow/graph?dag_id=example_bash_operator')
assert "runme_0" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/tree?num_runs=25&dag_id=example_bash_operator')
assert "runme_0" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/duration?days=30&dag_id=example_bash_operator')
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/tries?days=30&dag_id=example_bash_operator')
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/landing_times?'
'days=30&dag_id=example_bash_operator')
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/gantt?dag_id=example_bash_operator')
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/code?dag_id=example_bash_operator')
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/blocked')
response = self.app.get(
'/admin/configurationview/')
assert "Airflow Configuration" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/rendered?'
'task_id=runme_1&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_ISO))
assert "example_bash_operator" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/log?task_id=run_this_last&'
'dag_id=example_bash_operator&execution_date={}'
''.format(DEFAULT_DATE_ISO))
assert "run_this_last" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/task?'
'task_id=runme_0&dag_id=example_bash_operator&'
'execution_date={}'.format(DEFAULT_DATE_DS))
assert "Attributes" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/dag_stats')
assert "example_bash_operator" in response.data.decode('utf-8')
url = (
"/admin/airflow/success?task_id=run_this_last&"
"dag_id=example_bash_operator&upstream=false&downstream=false&"
"future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
assert "Wait a minute" in response.data.decode('utf-8')
response = self.app.get(url + "&confirmed=true")
response = self.app.get(
'/admin/airflow/clear?task_id=run_this_last&'
'dag_id=example_bash_operator&future=true&past=false&'
'upstream=true&downstream=false&'
'execution_date={}&'
'origin=/admin'.format(DEFAULT_DATE_DS))
assert "Wait a minute" in response.data.decode('utf-8')
url = (
"/admin/airflow/success?task_id=section-1&"
"dag_id=example_subdag_operator&upstream=true&downstream=true&"
"recursive=true&future=false&past=false&execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
assert "Wait a minute" in response.data.decode('utf-8')
assert "section-1-task-1" in response.data.decode('utf-8')
assert "section-1-task-2" in response.data.decode('utf-8')
assert "section-1-task-3" in response.data.decode('utf-8')
assert "section-1-task-4" in response.data.decode('utf-8')
assert "section-1-task-5" in response.data.decode('utf-8')
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/clear?task_id=runme_1&"
"dag_id=example_bash_operator&future=false&past=false&"
"upstream=false&downstream=true&"
"execution_date={}&"
"origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
assert "Wait a minute" in response.data.decode('utf-8')
response = self.app.get(url + "&confirmed=true")
url = (
"/admin/airflow/run?task_id=runme_0&"
"dag_id=example_bash_operator&force=true&deps=true&"
"execution_date={}&origin=/admin".format(DEFAULT_DATE_DS))
response = self.app.get(url)
response = self.app.get(
"/admin/airflow/refresh?dag_id=example_bash_operator")
response = self.app.get("/admin/airflow/refresh_all")
response = self.app.get(
"/admin/airflow/paused?"
"dag_id=example_python_operator&is_paused=false")
def test_charts(self):
session = Session()
chart_label = "Airflow task instance by type"
chart = session.query(
models.Chart).filter(models.Chart.label == chart_label).first()
chart_id = chart.id
session.close()
response = self.app.get(
'/admin/airflow/chart'
'?chart_id={}&iteration_no=1'.format(chart_id))
assert "Airflow task instance by type" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/chart_data'
'?chart_id={}&iteration_no=1'.format(chart_id))
assert "example" in response.data.decode('utf-8')
response = self.app.get(
'/admin/airflow/dag_details?dag_id=example_branch_operator')
assert "run_this_first" in response.data.decode('utf-8')
def test_fetch_task_instance(self):
url = (
"/admin/airflow/object/task_instances?"
"dag_id=example_bash_operator&"
"execution_date={}".format(DEFAULT_DATE_DS))
response = self.app.get(url)
assert "{}" in response.data.decode('utf-8')
TI = models.TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, force=True)
job.run()
response = self.app.get(url)
assert "runme_0" in response.data.decode('utf-8')
def tearDown(self):
self.dag_bash.clear(start_date=DEFAULT_DATE, end_date=datetime.now())
class WebPasswordAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.password_auth")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
from airflow.contrib.auth.backends.password_auth import PasswordUser
session = Session()
user = models.User()
password_user = PasswordUser(user)
password_user.username = 'airflow_passwordauth'
password_user.password = 'password'
print(password_user._password)
session.add(password_user)
session.commit()
session.close()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_password_auth(self):
assert configuration.getboolean('webserver', 'authenticate') is True
response = self.login('user1', 'whatever')
assert 'Incorrect login details' in response.data.decode('utf-8')
response = self.login('airflow_passwordauth', 'wrongpassword')
assert 'Incorrect login details' in response.data.decode('utf-8')
response = self.login('airflow_passwordauth', 'password')
assert 'Data Profiling' in response.data.decode('utf-8')
response = self.logout()
assert 'form-signin' in response.data.decode('utf-8')
def test_unauthorized_password_auth(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def tearDown(self):
configuration.test_mode()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class WebLdapAuthTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
app = application.create_app()
app.config['TESTING'] = True
self.app = app.test_client()
def get_csrf(self, response):
tree = html.fromstring(response.data)
form = tree.find('.//form')
return form.find('.//input[@name="_csrf_token"]').value
def login(self, username, password):
response = self.app.get('/admin/airflow/login')
csrf_token = self.get_csrf(response)
return self.app.post('/admin/airflow/login', data=dict(
username=username,
password=password,
csrf_token=csrf_token
), follow_redirects=True)
def logout(self):
return self.app.get('/admin/airflow/logout', follow_redirects=True)
def test_login_logout_ldap(self):
assert configuration.getboolean('webserver', 'authenticate') is True
response = self.login('user1', 'userx')
assert 'Incorrect login details' in response.data.decode('utf-8')
response = self.login('userz', 'user1')
assert 'Incorrect login details' in response.data.decode('utf-8')
response = self.login('user1', 'user1')
assert 'Data Profiling' in response.data.decode('utf-8')
response = self.logout()
assert 'form-signin' in response.data.decode('utf-8')
def test_unauthorized(self):
response = self.app.get("/admin/airflow/landing_times")
self.assertEqual(response.status_code, 302)
def test_no_filter(self):
response = self.login('user1', 'user1')
assert 'Data Profiling' in response.data.decode('utf-8')
assert 'Connections' in response.data.decode('utf-8')
def test_with_filters(self):
configuration.conf.set('ldap', 'superuser_filter',
'description=superuser')
configuration.conf.set('ldap', 'data_profiler_filter',
'description=dataprofiler')
response = self.login('dataprofiler', 'dataprofiler')
assert 'Data Profiling' in response.data.decode('utf-8')
response = self.login('superuser', 'superuser')
assert 'Connections' in response.data.decode('utf-8')
def tearDown(self):
configuration.test_mode()
session = Session()
session.query(models.User).delete()
session.commit()
session.close()
configuration.conf.set("webserver", "authenticate", "False")
class LdapGroupTest(unittest.TestCase):
def setUp(self):
configuration.conf.set("webserver", "authenticate", "True")
configuration.conf.set("webserver", "auth_backend", "airflow.contrib.auth.backends.ldap_auth")
try:
configuration.conf.add_section("ldap")
except:
pass
configuration.conf.set("ldap", "uri", "ldap://localhost:3890")
configuration.conf.set("ldap", "user_filter", "objectClass=*")
configuration.conf.set("ldap", "user_name_attr", "uid")
configuration.conf.set("ldap", "bind_user", "cn=Manager,dc=example,dc=com")
configuration.conf.set("ldap", "bind_password", "insecure")
configuration.conf.set("ldap", "basedn", "dc=example,dc=com")
configuration.conf.set("ldap", "cacert", "")
def test_group_belonging(self):
from airflow.contrib.auth.backends.ldap_auth import LdapUser
users = {"user1": ["group1", "group3"],
"user2": ["group2"]
}
for user in users:
mu = models.User(username=user,
is_superuser=False)
auth = LdapUser(mu)
assert set(auth.ldap_groups) == set(users[user])
def tearDown(self):
configuration.test_mode()
configuration.conf.set("webserver", "authenticate", "False")
class FakeSession(object):
def __init__(self):
from requests import Response
self.response = Response()
self.response.status_code = 200
self.response._content = 'airbnb/airflow'.encode('ascii', 'ignore')
def send(self, request, **kwargs):
return self.response
def prepare_request(self, request):
return self.response
class HttpOpSensorTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
args = {'owner': 'airflow', 'start_date': DEFAULT_DATE_ISO}
dag = DAG(TEST_DAG_ID, default_args=args)
self.dag = dag
@mock.patch('requests.Session', FakeSession)
def test_get(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
@mock.patch('requests.Session', FakeSession)
def test_get_response_check(self):
t = SimpleHttpOperator(
task_id='get_op',
method='GET',
endpoint='/search',
data={"client": "ubuntu", "q": "airflow"},
response_check=lambda response: ("airbnb/airflow" in response.text),
headers={},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
@mock.patch('requests.Session', FakeSession)
def test_sensor(self):
sensor = sensors.HttpSensor(
task_id='http_sensor_check',
conn_id='http_default',
endpoint='/search',
params={"client": "ubuntu", "q": "airflow"},
headers={},
response_check=lambda response: ("airbnb/airflow" in response.text),
poke_interval=5,
timeout=15,
dag=self.dag)
sensor.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, force=True)
class FakeWebHDFSHook(object):
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class ConnectionTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
assert c.host == 'ec2.compute.com'
assert c.schema == 'the_database'
assert c.login == 'username'
assert c.password == 'password'
assert c.port == 5432
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
assert c.host == 'ec2.compute.com'
assert c.schema == 'the_database'
assert c.login is None
assert c.password is None
assert c.port is None
def test_param_setup(self):
c = models.Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
assert c.host == 'localhost'
assert c.schema == 'airflow'
assert c.login == 'airflow'
assert c.password == 'airflow'
assert c.port is None
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
assert c.host != 'ec2.compute.com'
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
assert c.host == 'ec2.compute.com'
assert c.schema == 'the_database'
assert c.login == 'username'
assert c.password == 'password'
assert c.port == 5432
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
class WebHDFSHookTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
assert c.proxy_user == None
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
assert c.proxy_user == 'someone'
try:
from airflow.hooks.S3_hook import S3Hook
except ImportError:
S3Hook = None
@unittest.skipIf(S3Hook is None,
"Skipping test because S3Hook is not installed")
class S3HookTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
self.s3_test_url = "s3://test/this/is/not/a-real-key.txt"
def test_parse_s3_url(self):
parsed = S3Hook.parse_s3_url(self.s3_test_url)
self.assertEqual(parsed,
("test", "this/is/not/a-real-key.txt"),
"Incorrect parsing of the s3 url")
HELLO_SERVER_CMD = """
import socket, sys
listener = socket.socket()
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listener.bind(('localhost', 2134))
listener.listen(1)
sys.stdout.write('ready')
sys.stdout.flush()
conn = listener.accept()[0]
conn.sendall(b'hello')
"""
class SSHHookTest(unittest.TestCase):
def setUp(self):
configuration.test_mode()
from airflow.contrib.hooks.ssh_hook import SSHHook
self.hook = SSHHook()
self.hook.no_host_key_check = True
def test_remote_cmd(self):
output = self.hook.check_output(["echo", "-n", "airflow"])
self.assertEqual(output, b"airflow")
def test_tunnel(self):
print("Setting up remote listener")
import subprocess
import socket
self.handle = self.hook.Popen([
"python", "-c", '"{0}"'.format(HELLO_SERVER_CMD)
], stdout=subprocess.PIPE)
print("Setting up tunnel")
with self.hook.tunnel(2135, 2134):
print("Tunnel up")
server_output = self.handle.stdout.read(5)
self.assertEqual(server_output, b"ready")
print("Connecting to server via tunnel")
s = socket.socket()
s.connect(("localhost", 2135))
print("Receiving...", )
response = s.recv(5)
self.assertEqual(response, b"hello")
print("Closing connection")
s.close()
print("Waiting for listener...")
output, _ = self.handle.communicate()
self.assertEqual(self.handle.returncode, 0)
print("Closing tunnel")
send_email_test = mock.Mock()
class EmailTest(unittest.TestCase):
def setUp(self):
configuration.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_with('to', 'subject', 'content')
assert res == mock_send_email.return_value
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
configuration.set('email', 'EMAIL_BACKEND', 'tests.core.send_email_test')
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_with('to', 'subject', 'content', files=None, dryrun=False, cc=None, bcc=None)
assert not mock_send_email.called
class EmailSmtpTest(unittest.TestCase):
def setUp(self):
configuration.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
assert call_args[0] == configuration.get('smtp', 'SMTP_MAIL_FROM')
assert call_args[1] == ['to']
msg = call_args[2]
assert msg['Subject'] == 'subject'
assert msg['From'] == configuration.get('smtp', 'SMTP_MAIL_FROM')
assert len(msg.get_payload()) == 2
mimeapp = MIMEApplication('attachment')
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
assert mock_send_mime.called
call_args = mock_send_mime.call_args[0]
assert call_args[0] == configuration.get('smtp', 'SMTP_MAIL_FROM')
assert call_args[1] == ['to', 'cc', 'bcc']
msg = call_args[2]
assert msg['Subject'] == 'subject'
assert msg['From'] == configuration.get('smtp', 'SMTP_MAIL_FROM')
assert len(msg.get_payload()) == 2
mimeapp = MIMEApplication('attachment')
assert msg.get_payload()[-1].get_payload() == mimeapp.get_payload()
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
assert mock_smtp.return_value.starttls.called
mock_smtp.return_value.login.assert_called_with(
configuration.get('smtp', 'SMTP_USER'),
configuration.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_with('from', 'to', msg.as_string())
assert mock_smtp.return_value.quit.called
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
configuration.set('smtp', 'SMTP_SSL', 'True')
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
assert not mock_smtp.called
mock_smtp_ssl.assert_called_with(
configuration.get('smtp', 'SMTP_HOST'),
configuration.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
assert not mock_smtp.called
assert not mock_smtp_ssl.called
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Includes the Schedule class, validation functions, and compilation functions
for compiling a schedule of files to process.
Methods:
compile: shortcut for validating the loaded configuration, then
performing the search, and _resolve functions
load: load the schedule.yaml file into a dictionary
get_file_paths: return a dictionary of files for a given subject, task, and
data source.
search: search the data_path for all files matching the patterns.
validate_schema: validate yaml contents against the schedule configuration
schema.
validate_data_source_names: validates that the data source names contained
in the configuration match a given list of possible data source names
validate_patterns: validates that the regex patterns return named fields
matching a list of required named fields
Configuration schema (YAML):
{task_name (str):
{data_source_name (str):
{filetype (str): pattern (str)}
}
}
"""
from schema import Schema
import os
import re
import pandas as pd
import numpy as np
import functools
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
# TODO(janmtl): Schedule should extend pd.DataFrame
class Schedule(object):
"""
An object for scheduling files to be processed by data sources.
Args:
path (str): path to YAML schedule configuration file.
Attributes:
path (str): path to YAML schedule configuration file.
raw (dict): the dictionary resulting from the YAML configuration.
sched_df (pands.DataFrame): a Pandas DataFrame listing all files found
"""
def __init__(self, raw):
self.raw = self.validate_schema(raw)
self.sched_df = None
self.subjects = []
self.valid_subjects = []
self.invalid_subjects = []
@memoize
def get_subschedule(self, task_name, data_source_name):
"""Fetches the schedule for a given task and data source."""
return self.raw[task_name][data_source_name]
def compile(self, data_paths):
"""Search the data path for the files to add to the schedule."""
# TODO(janmtl): this should accept globs
# TODO(janmtl): should be able to pass a list of excluded subjects
if not isinstance(data_paths, list):
data_paths = list(data_paths)
files_df = self.search(self.raw, data_paths)
self.sched_df = self._resolve(files_df)
self.sched_df[['Subject', 'Task_Order']] = \
self.sched_df[['Subject', 'Task_Order']].astype(np.int64)
self.subjects = list(np.unique(self.sched_df['Subject']))
# TODO(janmtl): The function that checks the integrity of a subject's data
# should also return which subjects are broken and why
def validate_files(self):
"""Iterate over subjects and make sure that they all have all the files
they need."""
cf = (self.sched_df.pivot_table(index='Subject',
columns=['Data_Source_Name',
'Task_Name',
'File'],
values='Path',
aggfunc=lambda x: len(x)) == 1)
return cf
def remove_subject(self, subject_id):
self.sched_df = self.sched_df[self.sched_df['Subject'] != subject_id]
if subject_id in self.subjects:
self.subjects.remove(subject_id)
def isolate_subjects(self, subject_ids):
self.sched_df = self.sched_df[self.sched_df['Subject']
.isin(subject_ids)]
self.subjects = subject_ids
def isolate_tasks(self, task_names):
self.sched_df = self.sched_df[self.sched_df['Task_Name']
.isin(task_names)]
def isolate_data_sources(self, data_source_names):
self.sched_df = self.sched_df[self.sched_df['Data_Source_Name']
.isin(data_source_names)]
def get_file_paths(self, subject_id, task_name, data_source_name):
"""Return all a dictionary of all files for a given subject, task,
and data source."""
if self.sched_df.empty:
raise Exception('Schedule is empty, try Schedule.compile(path).')
sub_df = self.sched_df[
(self.sched_df['Subject'] == subject_id)
& (self.sched_df['Task_Name'] == task_name)
& (self.sched_df['Data_Source_Name'] == data_source_name)
]
if sub_df.empty:
raise Exception(
'({}, {}, {}) not found in schedule.'.format(subject_id,
task_name,
data_source_name)
)
list_of_files = sub_df[['File', 'Path']].to_dict('records')
files_dict = {ds['File']: ds['Path'] for ds in list_of_files}
return files_dict
@staticmethod
def search(raw, data_paths):
"""Search the data paths for matching file patterns and return a pandas
DataFrame of the results."""
files_dict = []
for task_name, task in raw.iteritems():
for data_source_name, patterns in task.iteritems():
for pattern_name, pattern in patterns.iteritems():
for data_path in data_paths:
for root, _, files in os.walk(data_path):
for filepath in files:
file_match = re.match(pattern, filepath)
if file_match:
fd = file_match.groupdict()
fd['Task_Name'] = task_name
fd['Data_Source_Name'] = data_source_name
fd['File'] = pattern_name
fd['Path'] = os.path.join(root, filepath)
files_dict.append(fd)
files_df = pd.DataFrame(files_dict)
files_df.fillna({'Task_Order': 0}, inplace=True)
files_df[['Subject', 'Task_Order']] = \
files_df[['Subject', 'Task_Order']].astype(np.int64)
return files_df
@staticmethod
def _resolve(files_df):
"""
Resolve any files that matched multiple Task_Order values and
return a subset of the Data Frame.
Args:
files_df (pandas.DataFrame): a DataFrame resulting from
Schedule.search().
"""
counter = files_df.groupby(['Subject',
'Data_Source_Name',
'File',
'Task_Name'])['Task_Order'].count()
maps = counter[counter == 1]
maps = maps.reset_index()
maps.drop('Task_Order', axis=1, inplace=True)
orders = pd.merge(maps, files_df)[['Subject',
'Task_Name',
'Task_Order']]
orders.drop_duplicates(inplace=True)
sched_df = pd.merge(orders, files_df)[['Subject',
'Task_Name',
'Task_Order',
'File',
'Data_Source_Name',
'Path']]
return sched_df
@staticmethod
def validate_schema(raw):
"""Validate the schedule dictionary against the schema described
above."""
schema = Schema({str: {str: {str: str}}})
return schema.validate(raw)
@staticmethod
def validate_data_source_names(raw, data_source_names):
"""
Validate that all data source names are contained in the
data_source_names list.
Args:
data_source_names (list(str)): list of valid data source names
implemented in pypsych.
"""
for _, task in raw.iteritems():
for data_source_name in task.keys():
if data_source_name not in data_source_names:
raise Exception(
'Schedule could not validate data source ',
data_source_name
)
@staticmethod
def validate_patterns(raw):
"""Validate that all file pattern regex expressions yield Task_Order
and Subject fields."""
for _, task in raw.iteritems():
for _, data_source in task.iteritems():
for _, pattern in data_source.iteritems():
compiled_pattern = re.compile(pattern)
for group_name in compiled_pattern.groupindex.keys():
if group_name not in ['Task_Order', 'Subject']:
raise Exception(
'Schedule could not validate pattern ',
pattern
)
|
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Defines functions for controlling debuggers for micro TVM binaries."""
import atexit
import abc
import errno
import logging
import os
import shlex
import signal
import subprocess
import sys
import termios
import threading
import time
import psutil
from .._ffi import register_func
from . import class_factory
from . import transport
from .transport.file_descriptor import FdTransport
_LOG = logging.getLogger(__name__)
class Debugger(metaclass=abc.ABCMeta):
"""An interface for controlling micro TVM debuggers."""
@abc.abstractmethod
def start(self):
"""Start the debugger, but do not block on it.
The runtime will continue to be driven in the background.
"""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Terminate the debugger."""
raise NotImplementedError()
class GdbDebugger(Debugger):
"""Handles launching, suspending signals, and potentially dealing with terminal issues."""
# Number of seconds to wait in stop() for a graceful shutdown. After this time has elapsed,
# the debugger is kill()'d.
_GRACEFUL_SHUTDOWN_TIMEOUT_SEC = 5.0
# The instance of GdbDebugger that's currently started.
_STARTED_INSTANCE = None
@classmethod
def _stop_all(cls):
if cls._STARTED_INSTANCE:
cls._STARTED_INSTANCE.stop()
def __init__(self):
super(GdbDebugger, self).__init__()
self._is_running = False
self._is_running_lock = threading.RLock()
self._child_exited_event = threading.Event()
self._signals_reset_event = threading.Event()
@abc.abstractmethod
def popen_kwargs(self):
raise NotImplementedError()
def _internal_stop(self):
if not self._is_running:
return
os.kill(os.getpid(), signal.SIGUSR1)
self._signals_reset_event.wait()
termios.tcsetattr(sys.stdin.fileno(), termios.TCSAFLUSH, self.old_termios)
try:
children = psutil.Process(self.popen.pid).children(recursive=True)
for c in children:
c.terminate()
_, alive = psutil.wait_procs(children, timeout=self._GRACEFUL_SHUTDOWN_TIMEOUT_SEC)
for a in alive:
a.kill()
except psutil.NoSuchProcess:
pass
finally:
self.__class__._STARTED_INSTANCE = None
self._is_running = False
self._child_exited_event.set()
def _wait_for_child(self):
self.popen.wait()
with self._is_running_lock:
self._internal_stop()
@classmethod
def _sigusr1_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigusr1 handler should not be invoked when GDB not started"
signal.signal(signal.SIGINT, cls._STARTED_INSTANCE.old_sigint_handler)
signal.signal(signal.SIGUSR1, cls._STARTED_INSTANCE.old_sigusr1_handler)
cls._STARTED_INSTANCE._signals_reset_event.set()
@classmethod
def _sigint_handler(cls, signum, stack_frame): # pylint: disable=unused-argument
assert (
cls._STARTED_INSTANCE is not None
), "overridden sigint handler should not be invoked when GDB not started"
with cls._STARTED_INSTANCE._is_running_lock:
exists = cls._STARTED_INSTANCE._is_running
if exists:
try:
os.killpg(cls._STARTED_INSTANCE.child_pgid, signal.SIGINT)
except ProcessLookupError:
pass
def start(self):
with self._is_running_lock:
assert not self._is_running
assert not self._STARTED_INSTANCE
kwargs = self.popen_kwargs()
self.did_start_new_session = kwargs.setdefault("start_new_session", True)
self.old_termios = termios.tcgetattr(sys.stdin.fileno())
self.popen = subprocess.Popen(**kwargs)
self._is_running = True
self.old_sigint_handler = signal.signal(signal.SIGINT, self._sigint_handler)
self.old_sigusr1_handler = signal.signal(signal.SIGUSR1, self._sigusr1_handler)
self.__class__._STARTED_INSTANCE = self
try:
self.child_pgid = os.getpgid(self.popen.pid)
except Exception:
self.stop()
raise
with self._is_running_lock:
self._is_child_alive = True
t = threading.Thread(target=self._wait_for_child)
t.daemon = True
t.start()
def stop(self):
self._child_exited_event.wait()
atexit.register(GdbDebugger._stop_all)
class GdbTransportDebugger(GdbDebugger):
"""A debugger that uses a single GDB subprocess as both the transport and the debugger.
Opens pipes for the target's stdin and stdout, launches GDB and configures GDB's target
arguments to read and write from the pipes using /dev/fd.
"""
def __init__(self, args, **popen_kw):
super(GdbTransportDebugger, self).__init__()
self.args = args
self.popen_kw = popen_kw
def popen_kwargs(self):
stdin_read, stdin_write = os.pipe()
stdout_read, stdout_write = os.pipe()
os.set_inheritable(stdin_read, True)
os.set_inheritable(stdout_write, True)
sysname = os.uname()[0]
if sysname == "Darwin":
args = [
"lldb",
"-O",
f"target create {self.args[0]}",
"-O",
f"settings set target.input-path /dev/fd/{stdin_read}",
"-O",
f"settings set target.output-path /dev/fd/{stdout_write}",
]
if len(self.args) > 1:
args.extend(
["-O", "settings set target.run-args {}".format(" ".join(self.args[1:]))]
)
elif sysname == "Linux":
args = [
"gdb",
"-ex",
f"file {self.args[0]}",
"-ex",
(
f"set args {' '.join(shlex.quote(a) for a in self.args[1:])} "
f"</dev/fd/{stdin_read} >/dev/fd/{stdout_write}"
),
]
else:
raise NotImplementedError(f"System {sysname} is not yet supported")
self.fd_transport = FdTransport(
stdout_read, stdin_write, transport.debug_transport_timeouts()
)
self.fd_transport.open()
return {
"args": args,
"pass_fds": [stdin_read, stdout_write],
}
def _internal_stop(self):
self.fd_transport.close()
super(GdbTransportDebugger, self)._internal_stop()
class _Transport(transport.Transport):
def __init__(self, gdb_transport_debugger):
self.gdb_transport_debugger = gdb_transport_debugger
def timeouts(self):
return transport.debug_transport_timeouts()
def open(self):
pass # Pipes opened by parent class.
def write(self, data, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.write(data, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when writes are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def read(self, n, timeout_sec):
end_time = time.monotonic() + timeout_sec if timeout_sec is not None else None
while True:
try:
return self.gdb_transport_debugger.fd_transport.read(n, timeout_sec)
except OSError as exc:
# NOTE: this error sometimes happens when reads are initiated before the child
# process launches.
if exc.errno == errno.EAGAIN:
if end_time is None or time.monotonic() < end_time:
time.sleep(0.1) # sleep to avoid excessive CPU usage
continue
raise exc
raise base.IoTimeoutError()
def close(self):
pass # Pipes closed by parent class (DebugWrapperTransport calls stop() next).
def transport(self):
return self._Transport(self)
class GdbRemoteDebugger(GdbDebugger):
"""A Debugger that invokes GDB and attaches to a remote GDBserver-based target."""
def __init__(
self, gdb_binary, remote_hostport, debug_binary, wrapping_context_manager=None, **popen_kw
):
super(GdbRemoteDebugger, self).__init__()
self.gdb_binary = gdb_binary
self.remote_hostport = remote_hostport
self.debug_binary = debug_binary
self.wrapping_context_manager = wrapping_context_manager
self.popen_kw = popen_kw
def popen_kwargs(self):
kwargs = {
"args": [
self.gdb_binary,
"-iex",
f"file {self.debug_binary}",
"-iex",
f"target remote {self.remote_hostport}",
],
}
kwargs.update(self.popen_kw)
return kwargs
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
super(GdbRemoteDebugger, self).start()
def stop(self):
try:
super(GdbRemoteDebugger, self).stop()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
GLOBAL_DEBUGGER = None
class DebuggerFactory(class_factory.ClassFactory):
SUPERCLASS = Debugger
def launch_debugger(debugger_factory, *args, **kw):
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
stop_debugger()
GLOBAL_DEBUGGER = debugger_factory.instantiate(*args, **kw)
GLOBAL_DEBUGGER.start()
@register_func("tvm.micro.debugger.launch_debugger")
def _launch_debugger(debugger_factory_json):
launch_debugger(DebuggerFactory.from_json(debugger_factory_json))
@register_func("tvm.micro.debugger.stop_debugger")
def stop_debugger():
global GLOBAL_DEBUGGER
if GLOBAL_DEBUGGER is not None:
try:
GLOBAL_DEBUGGER.stop()
finally:
GLOBAL_DEBUGGER = None
class RpcDebugger(Debugger):
"""A Debugger instance that launches the actual debugger on a remote TVM RPC server."""
def __init__(self, rpc_session, factory, wrapping_context_manager=None):
super(RpcDebugger, self).__init__()
self._factory = factory
self.launch_debugger = rpc_session.get_function("tvm.micro.debugger.launch_debugger")
self.stop_debugger = rpc_session.get_function("tvm.micro.debugger.stop_debugger")
self.wrapping_context_manager = wrapping_context_manager
def start(self):
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__enter__()
try:
self.launch_debugger(self._factory.to_json)
except Exception:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
raise
try:
input("Press [Enter] when debugger is set")
except Exception:
self.stop()
raise
self._is_running = True
def stop(self):
try:
self.stop_debugger()
finally:
if self.wrapping_context_manager is not None:
self.wrapping_context_manager.__exit__(None, None, None)
|
|
from decimal import Decimal
from django.core.cache import cache
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import unittest
from rest_framework import status, permissions
from rest_framework.compat import yaml, etree, patterns, url, include
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.renderers import BaseRenderer, JSONRenderer, YAMLRenderer, \
XMLRenderer, JSONPRenderer, BrowsableAPIRenderer
from rest_framework.parsers import YAMLParser, XMLParser
from rest_framework.settings import api_settings
from rest_framework.compat import StringIO
from rest_framework.compat import six
import datetime
import pickle
import re
DUMMYSTATUS = status.HTTP_200_OK
DUMMYCONTENT = 'dummycontent'
RENDERER_A_SERIALIZER = lambda x: ('Renderer A: %s' % x).encode('ascii')
RENDERER_B_SERIALIZER = lambda x: ('Renderer B: %s' % x).encode('ascii')
expected_results = [
((elem for elem in [1, 2, 3]), JSONRenderer, '[1, 2, 3]') # Generator
]
class BasicRendererTests(TestCase):
def test_expected_results(self):
for value, renderer_cls, expected in expected_results:
output = renderer_cls().render(value)
self.assertEqual(output, expected)
class RendererA(BaseRenderer):
media_type = 'mock/renderera'
format = "formata"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_A_SERIALIZER(data)
class RendererB(BaseRenderer):
media_type = 'mock/rendererb'
format = "formatb"
def render(self, data, media_type=None, renderer_context=None):
return RENDERER_B_SERIALIZER(data)
class MockView(APIView):
renderer_classes = (RendererA, RendererB)
def get(self, request, **kwargs):
response = Response(DUMMYCONTENT, status=DUMMYSTATUS)
return response
class MockGETView(APIView):
def get(self, request, **kwargs):
return Response({'foo': ['bar', 'baz']})
class HTMLView(APIView):
renderer_classes = (BrowsableAPIRenderer, )
def get(self, request, **kwargs):
return Response('text')
class HTMLView1(APIView):
renderer_classes = (BrowsableAPIRenderer, JSONRenderer)
def get(self, request, **kwargs):
return Response('text')
urlpatterns = patterns('',
url(r'^.*\.(?P<format>.+)$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^$', MockView.as_view(renderer_classes=[RendererA, RendererB])),
url(r'^cache$', MockGETView.as_view()),
url(r'^jsonp/jsonrenderer$', MockGETView.as_view(renderer_classes=[JSONRenderer, JSONPRenderer])),
url(r'^jsonp/nojsonrenderer$', MockGETView.as_view(renderer_classes=[JSONPRenderer])),
url(r'^html$', HTMLView.as_view()),
url(r'^html1$', HTMLView1.as_view()),
url(r'^api', include('rest_framework.urls', namespace='rest_framework'))
)
class POSTDeniedPermission(permissions.BasePermission):
def has_permission(self, request, view):
return request.method != 'POST'
class POSTDeniedView(APIView):
renderer_classes = (BrowsableAPIRenderer,)
permission_classes = (POSTDeniedPermission,)
def get(self, request):
return Response()
def post(self, request):
return Response()
def put(self, request):
return Response()
def patch(self, request):
return Response()
class DocumentingRendererTests(TestCase):
def test_only_permitted_forms_are_displayed(self):
view = POSTDeniedView.as_view()
request = RequestFactory().get('/')
response = view(request).render()
self.assertNotContains(response, '>POST<')
self.assertContains(response, '>PUT<')
self.assertContains(response, '>PATCH<')
class RendererEndToEndTests(TestCase):
"""
End-to-end testing of renderers using an RendererMixin on a generic view.
"""
urls = 'rest_framework.tests.renderers'
def test_default_renderer_serializes_content(self):
"""If the Accept header is not set the default renderer should serialize the response."""
resp = self.client.get('/')
self.assertEqual(resp['Content-Type'], RendererA.media_type)
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_head_method_serializes_no_content(self):
"""No response must be included in HEAD requests."""
resp = self.client.head('/')
self.assertEqual(resp.status_code, DUMMYSTATUS)
self.assertEqual(resp['Content-Type'], RendererA.media_type)
self.assertEqual(resp.content, six.b(''))
def test_default_renderer_serializes_content_on_accept_any(self):
"""If the Accept header is set to */* the default renderer should serialize the response."""
resp = self.client.get('/', HTTP_ACCEPT='*/*')
self.assertEqual(resp['Content-Type'], RendererA.media_type)
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for the default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererA.media_type)
self.assertEqual(resp['Content-Type'], RendererA.media_type)
self.assertEqual(resp.content, RENDERER_A_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_non_default_case(self):
"""If the Accept header is set the specified renderer should serialize the response.
(In this case we check that works for a non-default renderer)"""
resp = self.client.get('/', HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type)
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_accept_query(self):
"""The '_accept' query string should behave in the same way as the Accept header."""
param = '?%s=%s' % (
api_settings.URL_ACCEPT_OVERRIDE,
RendererB.media_type
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type)
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_unsatisfiable_accept_header_on_request_returns_406_status(self):
"""If the Accept header is unsatisfiable we should return a 406 Not Acceptable response."""
resp = self.client.get('/', HTTP_ACCEPT='foo/bar')
self.assertEqual(resp.status_code, status.HTTP_406_NOT_ACCEPTABLE)
def test_specified_renderer_serializes_content_on_format_query(self):
"""If a 'format' query is specified, the renderer with the matching
format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param)
self.assertEqual(resp['Content-Type'], RendererB.media_type)
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_serializes_content_on_format_kwargs(self):
"""If a 'format' keyword arg is specified, the renderer with the matching
format attribute should serialize the response."""
resp = self.client.get('/something.formatb')
self.assertEqual(resp['Content-Type'], RendererB.media_type)
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
def test_specified_renderer_is_used_on_format_query_with_matching_accept(self):
"""If both a 'format' query and a matching Accept header specified,
the renderer with the matching format attribute should serialize the response."""
param = '?%s=%s' % (
api_settings.URL_FORMAT_OVERRIDE,
RendererB.format
)
resp = self.client.get('/' + param,
HTTP_ACCEPT=RendererB.media_type)
self.assertEqual(resp['Content-Type'], RendererB.media_type)
self.assertEqual(resp.content, RENDERER_B_SERIALIZER(DUMMYCONTENT))
self.assertEqual(resp.status_code, DUMMYSTATUS)
_flat_repr = '{"foo": ["bar", "baz"]}'
_indented_repr = '{\n "foo": [\n "bar",\n "baz"\n ]\n}'
def strip_trailing_whitespace(content):
"""
Seems to be some inconsistencies re. trailing whitespace with
different versions of the json lib.
"""
return re.sub(' +\n', '\n', content)
class JSONRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_without_content_type_args(self):
"""
Test basic JSON rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json')
# Fix failing test case which depends on version of JSON library.
self.assertEqual(content, _flat_repr)
def test_with_content_type_args(self):
"""
Test JSON rendering with additional content type arguments supplied.
"""
obj = {'foo': ['bar', 'baz']}
renderer = JSONRenderer()
content = renderer.render(obj, 'application/json; indent=2')
self.assertEqual(strip_trailing_whitespace(content), _indented_repr)
class JSONPRendererTests(TestCase):
"""
Tests specific to the JSONP Renderer
"""
urls = 'rest_framework.tests.renderers'
def test_without_callback_with_json_renderer(self):
"""
Test JSONP rendering with View JSON Renderer.
"""
resp = self.client.get('/jsonp/jsonrenderer',
HTTP_ACCEPT='application/javascript')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp['Content-Type'], 'application/javascript')
self.assertEqual(resp.content,
('callback(%s);' % _flat_repr).encode('ascii'))
def test_without_callback_without_json_renderer(self):
"""
Test JSONP rendering without View JSON Renderer.
"""
resp = self.client.get('/jsonp/nojsonrenderer',
HTTP_ACCEPT='application/javascript')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp['Content-Type'], 'application/javascript')
self.assertEqual(resp.content,
('callback(%s);' % _flat_repr).encode('ascii'))
def test_with_callback(self):
"""
Test JSONP rendering with callback function name.
"""
callback_func = 'myjsonpcallback'
resp = self.client.get('/jsonp/nojsonrenderer?callback=' + callback_func,
HTTP_ACCEPT='application/javascript')
self.assertEqual(resp.status_code, status.HTTP_200_OK)
self.assertEqual(resp['Content-Type'], 'application/javascript')
self.assertEqual(resp.content,
('%s(%s);' % (callback_func, _flat_repr)).encode('ascii'))
if yaml:
_yaml_repr = 'foo: [bar, baz]\n'
class YAMLRendererTests(TestCase):
"""
Tests specific to the JSON Renderer
"""
def test_render(self):
"""
Test basic YAML rendering.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer()
content = renderer.render(obj, 'application/yaml')
self.assertEqual(content, _yaml_repr)
def test_render_and_parse(self):
"""
Test rendering and then parsing returns the original object.
IE obj -> render -> parse -> obj.
"""
obj = {'foo': ['bar', 'baz']}
renderer = YAMLRenderer()
parser = YAMLParser()
content = renderer.render(obj, 'application/yaml')
data = parser.parse(StringIO(content))
self.assertEqual(obj, data)
class XMLRendererTestCase(TestCase):
"""
Tests specific to the XML Renderer
"""
_complex_data = {
"creation_date": datetime.datetime(2011, 12, 25, 12, 45, 00),
"name": "name",
"sub_data_list": [
{
"sub_id": 1,
"sub_name": "first"
},
{
"sub_id": 2,
"sub_name": "second"
}
]
}
def test_render_string(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({'field': 'astring'}, 'application/xml')
self.assertXMLContains(content, '<field>astring</field>')
def test_render_integer(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({'field': 111}, 'application/xml')
self.assertXMLContains(content, '<field>111</field>')
def test_render_datetime(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({
'field': datetime.datetime(2011, 12, 25, 12, 45, 00)
}, 'application/xml')
self.assertXMLContains(content, '<field>2011-12-25 12:45:00</field>')
def test_render_float(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({'field': 123.4}, 'application/xml')
self.assertXMLContains(content, '<field>123.4</field>')
def test_render_decimal(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({'field': Decimal('111.2')}, 'application/xml')
self.assertXMLContains(content, '<field>111.2</field>')
def test_render_none(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render({'field': None}, 'application/xml')
self.assertXMLContains(content, '<field></field>')
def test_render_complex_data(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = renderer.render(self._complex_data, 'application/xml')
self.assertXMLContains(content, '<sub_name>first</sub_name>')
self.assertXMLContains(content, '<sub_name>second</sub_name>')
@unittest.skipUnless(etree, 'defusedxml not installed')
def test_render_and_parse_complex_data(self):
"""
Test XML rendering.
"""
renderer = XMLRenderer()
content = StringIO(renderer.render(self._complex_data, 'application/xml'))
parser = XMLParser()
complex_data_out = parser.parse(content)
error_msg = "complex data differs!IN:\n %s \n\n OUT:\n %s" % (repr(self._complex_data), repr(complex_data_out))
self.assertEqual(self._complex_data, complex_data_out, error_msg)
def assertXMLContains(self, xml, string):
self.assertTrue(xml.startswith('<?xml version="1.0" encoding="utf-8"?>\n<root>'))
self.assertTrue(xml.endswith('</root>'))
self.assertTrue(string in xml, '%r not in %r' % (string, xml))
# Tests for caching issue, #346
class CacheRenderTest(TestCase):
"""
Tests specific to caching responses
"""
urls = 'rest_framework.tests.renderers'
cache_key = 'just_a_cache_key'
@classmethod
def _get_pickling_errors(cls, obj, seen=None):
""" Return any errors that would be raised if `obj' is pickled
Courtesy of koffie @ http://stackoverflow.com/a/7218986/109897
"""
if seen == None:
seen = []
try:
state = obj.__getstate__()
except AttributeError:
return
if state == None:
return
if isinstance(state, tuple):
if not isinstance(state[0], dict):
state = state[1]
else:
state = state[0].update(state[1])
result = {}
for i in state:
try:
pickle.dumps(state[i], protocol=2)
except pickle.PicklingError:
if not state[i] in seen:
seen.append(state[i])
result[i] = cls._get_pickling_errors(state[i], seen)
return result
def http_resp(self, http_method, url):
"""
Simple wrapper for Client http requests
Removes the `client' and `request' attributes from as they are
added by django.test.client.Client and not part of caching
responses outside of tests.
"""
method = getattr(self.client, http_method)
resp = method(url)
del resp.client, resp.request
return resp
def test_obj_pickling(self):
"""
Test that responses are properly pickled
"""
resp = self.http_resp('get', '/cache')
# Make sure that no pickling errors occurred
self.assertEqual(self._get_pickling_errors(resp), {})
# Unfortunately LocMem backend doesn't raise PickleErrors but returns
# None instead.
cache.set(self.cache_key, resp)
self.assertTrue(cache.get(self.cache_key) is not None)
def test_head_caching(self):
"""
Test caching of HEAD requests
"""
resp = self.http_resp('head', '/cache')
cache.set(self.cache_key, resp)
cached_resp = cache.get(self.cache_key)
self.assertIsInstance(cached_resp, Response)
def test_get_caching(self):
"""
Test caching of GET requests
"""
resp = self.http_resp('get', '/cache')
cache.set(self.cache_key, resp)
cached_resp = cache.get(self.cache_key)
self.assertIsInstance(cached_resp, Response)
self.assertEqual(cached_resp.content, resp.content)
|
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2013 NTT MCL Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from django.conf import settings
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.http import HttpResponse # noqa
from django.views.generic import TemplateView # noqa
from django.views.generic import View # noqa
from openstack_horizon import api
from openstack_horizon.usage import quotas
from openstack_horizon.dashboards.project.network_topology.instances \
import tables as instances_tables
from openstack_horizon.dashboards.project.network_topology.ports \
import tables as ports_tables
from openstack_horizon.dashboards.project.network_topology.routers \
import tables as routers_tables
from openstack_horizon.dashboards.project.instances import\
views as i_views
from openstack_horizon.dashboards.project.instances.workflows import\
create_instance as i_workflows
from openstack_horizon.dashboards.project.networks import\
views as n_views
from openstack_horizon.dashboards.project.networks import\
workflows as n_workflows
from openstack_horizon.dashboards.project.routers import\
views as r_views
class NTCreateRouterView(r_views.CreateView):
template_name = 'project/network_topology/create_router.html'
success_url = reverse_lazy("horizon:project:network_topology:index")
class NTCreateNetwork(n_workflows.CreateNetwork):
def get_success_url(self):
return reverse("horizon:project:network_topology:index")
def get_failure_url(self):
return reverse("horizon:project:network_topology:index")
class NTCreateNetworkView(n_views.CreateView):
workflow_class = NTCreateNetwork
class NTLaunchInstance(i_workflows.LaunchInstance):
success_url = "horizon:project:network_topology:index"
class NTLaunchInstanceView(i_views.LaunchInstanceView):
workflow_class = NTLaunchInstance
class InstanceView(i_views.IndexView):
table_class = instances_tables.InstancesTable
template_name = 'project/network_topology/iframe.html'
class RouterView(r_views.IndexView):
table_class = routers_tables.RoutersTable
template_name = 'project/network_topology/iframe.html'
class RouterDetailView(r_views.DetailView):
table_classes = (ports_tables.PortsTable, )
template_name = 'project/network_topology/iframe.html'
def get_interfaces_data(self):
pass
class NetworkTopologyView(TemplateView):
template_name = 'project/network_topology/index.html'
def _has_permission(self, policy):
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(policy, self.request)
return has_permission
def _quota_exceeded(self, quota):
usages = quotas.tenant_quota_usages(self.request)
available = usages[quota]['available']
return available <= 0
def get_context_data(self, **kwargs):
context = super(NetworkTopologyView, self).get_context_data(**kwargs)
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
context['launch_instance_allowed'] = self._has_permission(
(("compute", "compute:create"),))
context['instance_quota_exceeded'] = self._quota_exceeded('instances')
context['create_network_allowed'] = self._has_permission(
(("network", "create_network"),))
context['network_quota_exceeded'] = self._quota_exceeded('networks')
context['create_router_allowed'] = (
network_config.get('enable_router', True) and
self._has_permission((("network", "create_router"),)))
context['router_quota_exceeded'] = self._quota_exceeded('routers')
context['console_type'] = getattr(
settings, 'CONSOLE_TYPE', 'AUTO')
return context
class JSONView(View):
@property
def is_router_enabled(self):
network_config = getattr(settings, 'OPENSTACK_NEUTRON_NETWORK', {})
return network_config.get('enable_router', True)
def add_resource_url(self, view, resources):
tenant_id = self.request.user.tenant_id
for resource in resources:
if (resource.get('tenant_id')
and tenant_id != resource.get('tenant_id')):
continue
resource['url'] = reverse(view, None, [str(resource['id'])])
def _check_router_external_port(self, ports, router_id, network_id):
for port in ports:
if (port['network_id'] == network_id
and port['device_id'] == router_id):
return True
return False
def _get_servers(self, request):
# Get nova data
try:
servers, more = api.nova.server_list(request)
except Exception:
servers = []
console_type = getattr(settings, 'CONSOLE_TYPE', 'AUTO')
if console_type == 'SPICE':
console = 'spice'
else:
console = 'vnc'
data = [{'name': server.name,
'status': server.status,
'console': console,
'task': getattr(server, 'OS-EXT-STS:task_state'),
'id': server.id} for server in servers]
self.add_resource_url('horizon:project:instances:detail', data)
return data
def _get_networks(self, request):
# Get neutron data
# if we didn't specify tenant_id, all networks shown as admin user.
# so it is need to specify the networks. However there is no need to
# specify tenant_id for subnet. The subnet which belongs to the public
# network is needed to draw subnet information on public network.
try:
neutron_networks = api.neutron.network_list_for_tenant(
request,
request.user.tenant_id)
except Exception:
neutron_networks = []
networks = [{'name': network.name,
'id': network.id,
'subnets': [{'cidr': subnet.cidr}
for subnet in network.subnets],
'router:external': network['router:external']}
for network in neutron_networks]
self.add_resource_url('horizon:project:networks:detail',
networks)
# Add public networks to the networks list
if self.is_router_enabled:
try:
neutron_public_networks = api.neutron.network_list(
request,
**{'router:external': True})
except Exception:
neutron_public_networks = []
my_network_ids = [net['id'] for net in networks]
for publicnet in neutron_public_networks:
if publicnet.id in my_network_ids:
continue
try:
subnets = [{'cidr': subnet.cidr}
for subnet in publicnet.subnets]
except Exception:
subnets = []
networks.append({
'name': publicnet.name,
'id': publicnet.id,
'subnets': subnets,
'router:external': publicnet['router:external']})
return sorted(networks,
key=lambda x: x.get('router:external'),
reverse=True)
def _get_routers(self, request):
if not self.is_router_enabled:
return []
try:
neutron_routers = api.neutron.router_list(
request,
tenant_id=request.user.tenant_id)
except Exception:
neutron_routers = []
routers = [{'id': router.id,
'name': router.name,
'status': router.status,
'external_gateway_info': router.external_gateway_info}
for router in neutron_routers]
self.add_resource_url('horizon:project:routers:detail', routers)
return routers
def _get_ports(self, request):
try:
neutron_ports = api.neutron.port_list(request)
except Exception:
neutron_ports = []
ports = [{'id': port.id,
'network_id': port.network_id,
'device_id': port.device_id,
'fixed_ips': port.fixed_ips,
'device_owner': port.device_owner,
'status': port.status}
for port in neutron_ports]
self.add_resource_url('horizon:project:networks:ports:detail',
ports)
return ports
def _prepare_gateway_ports(self, routers, ports):
# user can't see port on external network. so we are
# adding fake port based on router information
for router in routers:
external_gateway_info = router.get('external_gateway_info')
if not external_gateway_info:
continue
external_network = external_gateway_info.get(
'network_id')
if not external_network:
continue
if self._check_router_external_port(ports,
router['id'],
external_network):
continue
fake_port = {'id': 'gateway%s' % external_network,
'network_id': external_network,
'device_id': router['id'],
'fixed_ips': []}
ports.append(fake_port)
def get(self, request, *args, **kwargs):
data = {'servers': self._get_servers(request),
'networks': self._get_networks(request),
'ports': self._get_ports(request),
'routers': self._get_routers(request)}
self._prepare_gateway_ports(data['routers'], data['ports'])
json_string = json.dumps(data, ensure_ascii=False)
return HttpResponse(json_string, content_type='text/json')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.