text
stringlengths 12
1.05M
| repo_name
stringlengths 5
86
| path
stringlengths 4
191
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 12
1.05M
| keyword
listlengths 1
23
| text_hash
stringlengths 64
64
|
|---|---|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Output Plugin for Helix MP3 encoder
Copyright (c) 2006-2008 by Nyaochi
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
http://www.gnu.org/copyleft/gpl.html .
"""
from celib import *
class HelixMP3Output(OutputModule):
def __init__(self):
self.name = 'hmp3'
self.is_utf8 = False
self.ext = '.mp3'
self.cmd = 'hmp3'
self.cmdtag = 'tag'
self.doc = OutputModuleDocument()
self.doc.tools = (
'Helix MPEG Layer III audio encoder (hmp3)',
'Tag - Automatic Tag from filename',
)
self.doc.commands = (self.cmd, self.cmdtag)
self.doc.limitations = (
'Writes APEv2 tags',
)
self.doc.tags = ('TITLE','ARTIST','ALBUM','TRACKNUMBER','GENRE','DATE')
def handle_track(self, track, options):
# Add the command line to read the source audio.
args = []
args.append(track['input_cmdline'])
args.append('|')
# Add arguments for mp3sencoder.
args.append(qstr(self.cmd))
args.append(qstr('-'))
args.append(qstr(track['output']))
args.append(track.get('output_option'))
cmdline = args_to_string(args)
self.console.execute(cmdline)
# Tag the output file.
args = []
args.append(qstr(self.cmdtag))
args.append(optstr('--title', track.get('TITLE')))
args.append(optstr('--artist', track.get('ARTIST')))
args.append(optstr('--album', track.get('ALBUM')))
args.append(optstr('--track', track.get('TRACKNUMBER')))
args.append(optstr('--genre', track.get('GENRE')))
args.append(optstr('--year', track.get('DATE')))
args.append('--ape2')
args.append(track.get('output_option_tag'))
args.append(qstr(track['output']))
# if track.get('COMPILATION'):
# args.append(optstr('--comment', 'COMPILATION=1'))
# Execute the command.
cmdline = args_to_string(args)
return self.console.execute(cmdline)
|
rinrinne/cueproc-alternative
|
src/ce_hmp3.py
|
Python
|
gpl-2.0
| 2,706
|
[
"VisIt"
] |
a22c2156979b0bedaa8e4ad1e94ebd6a8b0cd2a4d83ed20be9bbd138569d66f1
|
# -*- coding: utf-8 -*-
"""
Authors: Gonzalo E. Espinoza-Dávalos
Contact: g.espinoza@un-ihe.org, gespinoza@utexas.edu
Repository: https://github.com/gespinoza/davgis
Module: davgis
Description:
This module is a python wrapper to simplify scripting and automation of common
GIS workflows used in water resources.
"""
from __future__ import division
import os
import math
import tempfile
import warnings
import ogr
import osr
import gdal
import pandas as pd
import netCDF4
from scipy.interpolate import griddata
np = pd.np
def Buffer(input_shp, output_shp, distance):
"""
Creates a buffer of the input shapefile by a given distance
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
inp_srs = inp_lyr.GetSpatialRef()
# Output
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, inp_srs, ogr.wkbPolygon)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add fields
for i in range(inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(i)
out_lyr.CreateField(field_defn)
# Add features
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
geometry = feature_inp.geometry()
feature_out = ogr.Feature(out_lyr_defn)
for j in range(0, out_lyr_defn.GetFieldCount()):
feature_out.SetField(out_lyr_defn.GetFieldDefn(j).GetNameRef(),
feature_inp.GetField(j))
feature_out.SetGeometry(geometry.Buffer(distance))
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_shp
def Feature_to_Raster(input_shp, output_tiff,
cellsize, field_name=False, NoData_value=-9999):
"""
Converts a shapefile into a raster
"""
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal.GDT_Int16)
out_source.SetGeoTransform((x_min, cellsize, 0, y_max, 0, -cellsize))
out_source.SetProjection(inp_srs.ExportToWkt())
out_lyr = out_source.GetRasterBand(1)
out_lyr.SetNoDataValue(NoData_value)
# Rasterize
if field_name:
gdal.RasterizeLayer(out_source, [1], inp_lyr,
options=["ATTRIBUTE={0}".format(field_name)])
else:
gdal.RasterizeLayer(out_source, [1], inp_lyr, burn_values=[1])
# Save and/or close the data sources
inp_source = None
out_source = None
# Return
return output_tiff
def List_Fields(input_lyr):
"""
Lists the field names of input layer
"""
# Input
if isinstance(input_lyr, str):
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr, 0)
inp_lyr = inp_source.GetLayer()
inp_lyr_defn = inp_lyr.GetLayerDefn()
elif isinstance(input_lyr, ogr.Layer):
inp_lyr_defn = input_lyr.GetLayerDefn()
# List
names_ls = []
# Loop
for j in range(0, inp_lyr_defn.GetFieldCount()):
field_defn = inp_lyr_defn.GetFieldDefn(j)
names_ls.append(field_defn.GetName())
# Save and/or close the data sources
inp_source = None
# Return
return names_ls
def Raster_to_Array(input_tiff, ll_corner, x_ncells, y_ncells,
values_type='float32'):
"""
Loads a raster into a numpy array
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
ll_x = ll_corner[0]
ll_y = ll_corner[1]
top_left_x = ll_x
top_left_y = ll_y - cellsize_y*y_ncells
# Change start point
temp_path = tempfile.mkdtemp()
temp_driver = gdal.GetDriverByName('GTiff')
temp_tiff = os.path.join(temp_path, os.path.basename(input_tiff))
temp_source = temp_driver.Create(temp_tiff, x_ncells, y_ncells,
1, inp_data_type)
temp_source.GetRasterBand(1).SetNoDataValue(NoData_value)
temp_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
temp_source.SetProjection(inp_srs)
# Snap
gdal.ReprojectImage(inp_lyr, temp_source, inp_srs, inp_srs,
gdal.GRA_Bilinear)
temp_source = None
# Read array
d_type = pd.np.dtype(values_type)
out_lyr = gdal.Open(temp_tiff)
array = out_lyr.ReadAsArray(0, 0, out_lyr.RasterXSize,
out_lyr.RasterYSize).astype(d_type)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
out_lyr = None
return array
def Resample(input_tiff, output_tiff, cellsize, method=None,
NoData_value=-9999):
"""
Resamples a raster to a different spatial resolution
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
# NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
x_ncells = int(math.floor(x_tot_n * (cellsize_x/cellsize)))
y_ncells = int(math.floor(y_tot_n * (-cellsize_y/cellsize)))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_source.GetRasterBand(1).SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize, rot_1,
top_left_y, rot_2, -cellsize))
out_source.SetProjection(inp_srs)
# Resampling
method_dict = {'NearestNeighbour': gdal.GRA_NearestNeighbour,
'Bilinear': gdal.GRA_Bilinear,
'Cubic': gdal.GRA_Cubic,
'CubicSpline': gdal.GRA_CubicSpline,
'Lanczos': gdal.GRA_Lanczos,
'Average': gdal.GRA_Average,
'Mode': gdal.GRA_Mode}
if method in range(6):
method_sel = method
elif method in method_dict.keys():
method_sel = method_dict[method]
else:
warnings.warn('Using default interpolation method: Nearest Neighbour')
method_sel = 0
gdal.ReprojectImage(inp_lyr, out_source, inp_srs, inp_srs, method_sel)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Array_to_Raster(input_array, output_tiff, ll_corner, cellsize,
srs_wkt):
"""
Saves an array into a raster file
"""
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = input_array.shape
gdal_datatype = gdaltype_from_dtype(input_array.dtype)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(-9999)
out_top_left_x = ll_corner[0]
out_top_left_y = ll_corner[1] + cellsize*y_ncells
out_source.SetGeoTransform((out_top_left_x, cellsize, 0,
out_top_left_y, 0, -cellsize))
out_source.SetProjection(str(srs_wkt))
out_band.WriteArray(input_array)
# Save and/or close the data sources
out_source = None
# Return
return output_tiff
def Clip(input_tiff, output_tiff, bbox):
"""
Clips a raster given a bounding box
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
# Bounding box
xmin, ymin, xmax, ymax = bbox
# Get indices, number of cells, and top left corner
x1 = max([0, int(math.floor((xmin - top_left_x)/cellsize_x))])
x2 = min([x_tot_n, int(math.ceil((xmax - top_left_x)/cellsize_x))])
y1 = max([0, int(math.floor((ymax - top_left_y)/cellsize_y))])
y2 = min([y_tot_n, int(math.ceil((ymin - top_left_y)/cellsize_y))])
x_ncells = x2 - x1
y_ncells = y2 - y1
out_top_left_x = top_left_x + x1*cellsize_x
out_top_left_y = top_left_y + y1*cellsize_y
# Output
out_array = inp_array[y1:y2, x1:x2]
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((out_top_left_x, cellsize_x, rot_1,
out_top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Raster_to_Points(input_tiff, output_shp):
"""
Converts a raster to a point shapefile
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
top_left_x = transform[0]
cellsize_x = transform[1]
top_left_y = transform[3]
cellsize_y = transform[5]
NoData_value = inp_band.GetNoDataValue()
x_tot_n = inp_lyr.RasterXSize
y_tot_n = inp_lyr.RasterYSize
top_left_x_center = top_left_x + cellsize_x/2.0
top_left_y_center = top_left_y + cellsize_y/2.0
# Read array
array = inp_lyr.ReadAsArray(0, 0, x_tot_n, y_tot_n) # .astype(pd.np.float)
array[pd.np.isclose(array, NoData_value)] = pd.np.nan
# Output
out_srs = osr.SpatialReference()
out_srs.ImportFromWkt(inp_srs)
out_name = os.path.splitext(os.path.basename(output_shp))[0]
out_driver = ogr.GetDriverByName('ESRI Shapefile')
if os.path.exists(output_shp):
out_driver.DeleteDataSource(output_shp)
out_source = out_driver.CreateDataSource(output_shp)
out_lyr = out_source.CreateLayer(out_name, out_srs, ogr.wkbPoint)
ogr_field_type = ogrtype_from_dtype(array.dtype)
Add_Field(out_lyr, "RASTERVALU", ogr_field_type)
out_lyr_defn = out_lyr.GetLayerDefn()
# Add features
for xi in range(x_tot_n):
for yi in range(y_tot_n):
value = array[yi, xi]
if ~pd.np.isnan(value):
feature_out = ogr.Feature(out_lyr_defn)
feature_out.SetField2(0, value)
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(top_left_x_center + xi*cellsize_x,
top_left_y_center + yi*cellsize_y)
feature_out.SetGeometry(point)
out_lyr.CreateFeature(feature_out)
feature_out = None
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_shp
def Add_Field(input_lyr, field_name, ogr_field_type):
"""
Add a field to a layer using the following ogr field types:
0 = ogr.OFTInteger
1 = ogr.OFTIntegerList
2 = ogr.OFTReal
3 = ogr.OFTRealList
4 = ogr.OFTString
5 = ogr.OFTStringList
6 = ogr.OFTWideString
7 = ogr.OFTWideStringList
8 = ogr.OFTBinary
9 = ogr.OFTDate
10 = ogr.OFTTime
11 = ogr.OFTDateTime
"""
# List fields
fields_ls = List_Fields(input_lyr)
# Check if field exist
if field_name in fields_ls:
raise Exception('Field: "{0}" already exists'.format(field_name))
# Create field
inp_field = ogr.FieldDefn(field_name, ogr_field_type)
input_lyr.CreateField(inp_field)
return inp_field
def Spatial_Reference(epsg, return_string=True):
"""
Obtain a spatial reference from the EPSG parameter
"""
srs = osr.SpatialReference()
srs.ImportFromEPSG(epsg)
if return_string:
return srs.ExportToWkt()
else:
return srs
def List_Datasets(path, ext):
"""
List the data sets in a folder
"""
datsets_ls = []
for f in os.listdir(path):
if os.path.splitext(f)[1][1:] == ext:
datsets_ls.append(f)
return datsets_ls
def NetCDF_to_Raster(input_nc, output_tiff, ras_variable,
x_variable='longitude', y_variable='latitude',
crs={'variable': 'crs', 'wkt': 'crs_wkt'}, time=None):
"""
Extract a layer from a netCDF file and save it as a raster file.
For temporal netcdf files, use the 'time' parameter as:
t = {'variable': 'time_variable', 'value': '30/06/2017'}
"""
# Input
inp_nc = netCDF4.Dataset(input_nc, 'r')
inp_values = inp_nc.variables[ras_variable]
x_index = inp_values.dimensions.index(x_variable)
y_index = inp_values.dimensions.index(y_variable)
if not time:
inp_array = inp_values[:]
else:
time_variable = time['variable']
time_value = time['value']
t_index = inp_values.dimensions.index(time_variable)
time_index = list(inp_nc.variables[time_variable][:]).index(time_value)
if t_index == 0:
inp_array = inp_values[time_index, :, :]
elif t_index == 1:
inp_array = inp_values[:, time_index, :]
elif t_index == 2:
inp_array = inp_values[:, :, time_index]
else:
raise Exception("The array has more dimensions than expected")
# Transpose array if necessary
if y_index > x_index:
inp_array = pd.np.transpose(inp_array)
# Additional parameters
gdal_datatype = gdaltype_from_dtype(inp_array.dtype)
NoData_value = inp_nc.variables[ras_variable]._FillValue
if type(crs) == str:
srs_wkt = crs
else:
crs_variable = crs['variable']
crs_wkt = crs['wkt']
exec('srs_wkt = str(inp_nc.variables["{0}"].{1})'.format(crs_variable,
crs_wkt))
inp_x = inp_nc.variables[x_variable]
inp_y = inp_nc.variables[y_variable]
cellsize_x = abs(pd.np.mean([inp_x[i] - inp_x[i-1]
for i in range(1, len(inp_x))]))
cellsize_y = -abs(pd.np.mean([inp_y[i] - inp_y[i-1]
for i in range(1, len(inp_y))]))
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
y_ncells, x_ncells = inp_array.shape
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, gdal_datatype)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(pd.np.asscalar(NoData_value))
out_top_left_x = inp_x[0] - cellsize_x/2.0
if inp_y[-1] > inp_y[0]:
out_top_left_y = inp_y[-1] - cellsize_y/2.0
inp_array = pd.np.flipud(inp_array)
else:
out_top_left_y = inp_y[0] - cellsize_y/2.0
out_source.SetGeoTransform((out_top_left_x, cellsize_x, 0,
out_top_left_y, 0, cellsize_y))
out_source.SetProjection(srs_wkt)
out_band.WriteArray(inp_array)
out_band.ComputeStatistics(True)
# Save and/or close the data sources
inp_nc.close()
out_source = None
# Return
return output_tiff
def Apply_Filter(input_tiff, output_tiff, number_of_passes):
"""
Smooth a raster by replacing cell value by the average value of the
surrounding cells
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(1)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
top_left_x = inp_transform[0]
cellsize_x = inp_transform[1]
rot_1 = inp_transform[2]
top_left_y = inp_transform[3]
rot_2 = inp_transform[4]
cellsize_y = inp_transform[5]
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Filter
inp_array[inp_array == NoData_value] = pd.np.nan
out_array = array_filter(inp_array, number_of_passes)
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform((top_left_x, cellsize_x, rot_1,
top_left_y, rot_2, cellsize_y))
out_source.SetProjection(inp_srs)
out_band.WriteArray(out_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Extract_Band(input_tiff, output_tiff, band_number=1):
"""
Extract and save a raster band into a new raster
"""
# Input
inp_lyr = gdal.Open(input_tiff)
inp_srs = inp_lyr.GetProjection()
inp_transform = inp_lyr.GetGeoTransform()
inp_band = inp_lyr.GetRasterBand(band_number)
inp_array = inp_band.ReadAsArray()
inp_data_type = inp_band.DataType
NoData_value = inp_band.GetNoDataValue()
x_ncells = inp_lyr.RasterXSize
y_ncells = inp_lyr.RasterYSize
# Output
out_driver = gdal.GetDriverByName('GTiff')
if os.path.exists(output_tiff):
out_driver.Delete(output_tiff)
out_source = out_driver.Create(output_tiff, x_ncells, y_ncells,
1, inp_data_type)
out_band = out_source.GetRasterBand(1)
out_band.SetNoDataValue(NoData_value)
out_source.SetGeoTransform(inp_transform)
out_source.SetProjection(inp_srs)
out_band.WriteArray(inp_array)
# Save and/or close the data sources
inp_lyr = None
out_source = None
# Return
return output_tiff
def Get_Extent(input_lyr):
"""
Obtain the input layer extent (xmin, ymin, xmax, ymax)
"""
# Input
filename, ext = os.path.splitext(input_lyr)
if ext.lower() == '.shp':
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_lyr)
inp_lyr = inp_source.GetLayer()
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
inp_lyr = None
inp_source = None
elif ext.lower() == '.tif':
inp_lyr = gdal.Open(input_lyr)
inp_transform = inp_lyr.GetGeoTransform()
x_min = inp_transform[0]
x_max = x_min + inp_transform[1] * inp_lyr.RasterXSize
y_max = inp_transform[3]
y_min = y_max + inp_transform[5] * inp_lyr.RasterYSize
inp_lyr = None
else:
raise Exception('The input data type is not recognized')
return (x_min, y_min, x_max, y_max)
def Interpolation_Default(input_shp, field_name, output_tiff,
method='nearest', cellsize=None):
'''
Interpolate point data into a raster
Available methods: 'nearest', 'linear', 'cubic'
'''
# Input
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
inp_wkt = inp_srs.ExportToWkt()
# Extent
x_min, x_max, y_min, y_max = inp_lyr.GetExtent()
ll_corner = [x_min, y_min]
if not cellsize:
cellsize = min(x_max - x_min, y_max - y_min)/25.0
x_ncells = int((x_max - x_min) / cellsize)
y_ncells = int((y_max - y_min) / cellsize)
# Feature points
x = []
y = []
z = []
for i in range(inp_lyr.GetFeatureCount()):
feature_inp = inp_lyr.GetNextFeature()
point_inp = feature_inp.geometry().GetPoint()
x.append(point_inp[0])
y.append(point_inp[1])
z.append(feature_inp.GetField(field_name))
x = pd.np.array(x)
y = pd.np.array(y)
z = pd.np.array(z)
# Grid
X, Y = pd.np.meshgrid(pd.np.linspace(x_min + cellsize/2.0,
x_max - cellsize/2.0,
x_ncells),
pd.np.linspace(y_min + cellsize/2.0,
y_max - cellsize/2.0,
y_ncells))
# Interpolate
out_array = griddata((x, y), z, (X, Y), method=method)
out_array = pd.np.flipud(out_array)
# Save raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, inp_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Points(input_shp, field_name, output_tiff, cellsize,
bbox=None):
"""
Interpolate point data using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
# Spatial reference
inp_driver = ogr.GetDriverByName('ESRI Shapefile')
inp_source = inp_driver.Open(input_shp, 0)
inp_lyr = inp_source.GetLayer()
inp_srs = inp_lyr.GetSpatialRef()
srs_wkt = inp_srs.ExportToWkt()
inp_source = None
# Temp folder
temp_dir = tempfile.mkdtemp()
temp_points_tiff = os.path.join(temp_dir, 'points_ras.tif')
# Points to raster
Feature_to_Raster(input_shp, temp_points_tiff,
cellsize, field_name, -9999)
# Raster extent
if bbox:
xmin, ymin, xmax, ymax = bbox
ll_corner = [xmin, ymin]
x_ncells = int(math.ceil((xmax - xmin)/cellsize))
y_ncells = int(math.ceil((ymax - ymin)/cellsize))
else:
temp_lyr = gdal.Open(temp_points_tiff)
x_min, x_max, y_min, y_max = temp_lyr.GetExtent()
ll_corner = [x_min, y_min]
x_ncells = temp_lyr.RasterXSize
y_ncells = temp_lyr.RasterYSize
temp_lyr = None
# Raster to array
points_array = Raster_to_Array(temp_points_tiff, ll_corner,
x_ncells, y_ncells, values_type='float32')
# Run kriging
x_vector = np.arange(xmin + cellsize/2, xmax + cellsize/2, cellsize)
y_vector = np.arange(ymin + cellsize/2, ymax + cellsize/2, cellsize)
out_array = Kriging_Interpolation_Array(points_array, x_vector, y_vector)
# Save array as raster
Array_to_Raster(out_array, output_tiff, ll_corner, cellsize, srs_wkt)
# Return
return output_tiff
def Kriging_Interpolation_Array(input_array, x_vector, y_vector):
"""
Interpolate data in an array using Ordinary Kriging
Reference: https://cran.r-project.org/web/packages/automap/automap.pdf
"""
import rpy2.robjects as robjects
from rpy2.robjects import pandas2ri
# Total values in array
n_values = np.isfinite(input_array).sum()
# Load function
pandas2ri.activate()
robjects.r('''
library(gstat)
library(sp)
library(automap)
kriging_interpolation <- function(x_vec, y_vec, values_arr,
n_values){
# Parameters
shape <- dim(values_arr)
counter <- 1
df <- data.frame(X=numeric(n_values),
Y=numeric(n_values),
INFZ=numeric(n_values))
# Save values into a data frame
for (i in seq(shape[2])) {
for (j in seq(shape[1])) {
if (is.finite(values_arr[j, i])) {
df[counter,] <- c(x_vec[i], y_vec[j], values_arr[j, i])
counter <- counter + 1
}
}
}
# Grid
coordinates(df) = ~X+Y
int_grid <- expand.grid(x_vec, y_vec)
names(int_grid) <- c("X", "Y")
coordinates(int_grid) = ~X+Y
gridded(int_grid) = TRUE
# Kriging
krig_output <- autoKrige(INFZ~1, df, int_grid)
# Array
values_out <- matrix(krig_output$krige_output$var1.pred,
nrow=length(y_vec),
ncol=length(x_vec),
byrow = TRUE)
return(values_out)
}
''')
kriging_interpolation = robjects.r['kriging_interpolation']
# Execute kriging function and get array
r_array = kriging_interpolation(x_vector, y_vector, input_array, n_values)
array_out = np.array(r_array)
# Return
return array_out
def get_neighbors(x, y, nx, ny, cells=1):
"""
Get a list of neighboring cells
"""
neighbors_ls = [(xi, yi)
for xi in range(x - 1 - cells + 1, x + 2 + cells - 1)
for yi in range(y - 1 - cells + 1, y + 2 + cells - 1)
if (-1 < x <= nx - 1 and -1 < y <= ny - 1 and
(x != xi or y != yi) and
(0 <= xi <= nx - 1) and (0 <= yi <= ny - 1))]
return neighbors_ls
def get_mean_neighbors(array, index, include_cell=False):
"""
Get the mean value of neighboring cells
"""
xi, yi = index
nx, ny = array.shape
stay = True
cells = 1
while stay:
neighbors_ls = get_neighbors(xi, yi, nx, ny, cells)
if include_cell:
neighbors_ls = neighbors_ls + [(xi, yi)]
values_ls = [array[i] for i in neighbors_ls]
if pd.np.isnan(values_ls).all():
cells += 1
else:
value = pd.np.nanmean(values_ls)
stay = False
return value
def array_filter(array, number_of_passes=1):
"""
Smooth cell values by replacing each cell value by the average value of the
surrounding cells
"""
while number_of_passes >= 1:
ny, nx = array.shape
arrayf = pd.np.empty(array.shape)
arrayf[:] = pd.np.nan
for j in range(ny):
for i in range(nx):
arrayf[j, i] = get_mean_neighbors(array, (j, i), True)
array[:] = arrayf[:]
number_of_passes -= 1
return arrayf
def ogrtype_from_dtype(d_type):
"""
Return the ogr data type from the numpy dtype
"""
# ogr field type
if 'float' in d_type.name:
ogr_data_type = 2
elif 'int' in d_type.name:
ogr_data_type = 0
elif 'string' in d_type.name:
ogr_data_type = 4
elif 'bool' in d_type.name:
ogr_data_type = 8
else:
raise Exception('"{0}" is not recognized'.format(d_type))
return ogr_data_type
def gdaltype_from_dtype(d_type):
"""
Return the gdal data type from the numpy dtype
"""
# gdal field type
if 'int8' == d_type.name:
gdal_data_type = 1
elif 'uint16' == d_type.name:
gdal_data_type = 2
elif 'int16' == d_type.name:
gdal_data_type = 3
elif 'uint32' == d_type.name:
gdal_data_type = 4
elif 'int32' == d_type.name:
gdal_data_type = 5
elif 'float32' == d_type.name:
gdal_data_type = 6
elif 'float64' == d_type.name:
gdal_data_type = 7
elif 'bool' in d_type.name:
gdal_data_type = 1
elif 'int' in d_type.name:
gdal_data_type = 5
elif 'float' in d_type.name:
gdal_data_type = 7
elif 'complex' == d_type.name:
gdal_data_type = 11
else:
warnings.warn('"{0}" is not recognized. '
'"Unknown" data type used'.format(d_type))
gdal_data_type = 0
return gdal_data_type
|
wateraccounting/SEBAL
|
hants_old/wa_gdal/davgis/functions.py
|
Python
|
apache-2.0
| 29,458
|
[
"NetCDF"
] |
167dbb923464018cd8e8a8115a11fe21d291f51214b7563cda07acacf33cf893
|
""" ProxyManager is the implementation of the ProxyManagement service
in the DISET framework
"""
import types
import os
from DIRAC import gLogger, S_OK, S_ERROR, gConfig, rootPath
from DIRAC.Core.DISET.RequestHandler import RequestHandler
from DIRAC.Core.Utilities.File import mkDir
from DIRAC.FrameworkSystem.private.SecurityFileLog import SecurityFileLog
from DIRAC.FrameworkSystem.Client.SecurityLogClient import SecurityLogClient
__RCSID__ = "$Id$"
gSecurityFileLog = False
def initializeSecurityLoggingHandler( serviceInfo ):
global gSecurityFileLog
serviceCS = serviceInfo [ 'serviceSectionPath' ]
dataPath = gConfig.getValue( "%s/DataLocation" % serviceCS, "data/securityLog" )
dataPath = dataPath.strip()
if "/" != dataPath[0]:
dataPath = os.path.realpath( "%s/%s" % ( gConfig.getValue( '/LocalSite/InstancePath', rootPath ), dataPath ) )
gLogger.info( "Data will be written into %s" % dataPath )
mkDir( dataPath )
try:
testFile = "%s/seclog.jarl.test" % dataPath
fd = file( testFile, "w" )
fd.close()
os.unlink( testFile )
except IOError:
gLogger.fatal( "Can't write to %s" % dataPath )
return S_ERROR( "Data location is not writable" )
#Define globals
gSecurityFileLog = SecurityFileLog( dataPath )
SecurityLogClient().setLogStore( gSecurityFileLog )
return S_OK()
class SecurityLoggingHandler( RequestHandler ):
types_logAction = [ ( types.ListType, types.TupleType ) ]
def export_logAction( self, secMsg ):
""" Log a single action
"""
result = gSecurityFileLog.logAction( secMsg )
if not result[ 'OK' ]:
return S_OK( [ ( secMsg, result[ 'Message' ] ) ] )
return S_OK()
types_logActionBundle = [ ( types.ListType, types.TupleType ) ]
def export_logActionBundle( self, secMsgList ):
""" Log a list of actions
"""
errorList = []
for secMsg in secMsgList:
result = gSecurityFileLog.logAction( secMsg )
if not result[ 'OK' ]:
errorList.append( ( secMsg, result[ 'Message' ] ) )
if errorList:
return S_OK( errorList )
return S_OK()
|
andresailer/DIRAC
|
FrameworkSystem/Service/SecurityLoggingHandler.py
|
Python
|
gpl-3.0
| 2,090
|
[
"DIRAC"
] |
2e5140628ced611804a39968e16e1f106bb7d2376e68901ae28554c8874a133c
|
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8 :
# vim: set foldmethod=marker commentstring=\ \ #\ %s :
#
# Author: Taishi Matsumura
# Created: 2016-06-29
#
# Copyright (C) 2015 Taishi Matsumura
#
from pylab import *
close('all')
class Neuron(object):
def __init__(self, N, dt):
self.N = N
self.t = 0.0
self.dt = dt
self.C = 1.5
self.gl = 0.5
self.gNa = 52.0
self.gK = 11.0
self.Vl = 0.0
self.VNa = 55.0
self.VK = -90.0
V = -60.0 * ones(N)
m = 0.0 * ones(N)
h = 0.0 * ones(N)
n = 0.0 * ones(N)
self.x_now = vstack((V, m, h, n))
def NeuronDerivs(self, t, x, I):
V, m, h, n = x
dxdt_V = (
- self.gl * (V - self.Vl)
- self.gNa * m ** 3 * h * (V - self.VNa)
- self.gK * n ** 4 * (V - self.VK)
+ I) / self.C
alpha_m = -0.1 * (V + 23.0) / (exp(-0.1 * (V + 23.0)) - 1.0)
beta_m = 4.0 * exp(-(V + 48.0) / 18.0)
m_inf = alpha_m / (alpha_m + beta_m)
tau_m = 1.0 / (alpha_m + beta_m)
alpha_h = 0.07 * exp(-(V + 37.0) / 20.0)
beta_h = 1.0 / (exp(-0.1 * (V + 7.0)) + 1.0)
h_inf = alpha_h / (alpha_h + beta_h)
tau_h = 1.0 / (alpha_h + beta_h)
alpha_n = -0.01 * (V + 27.0) / (exp(-0.1 * (V + 27.0)) - 1.0)
beta_n = 0.125 * exp(-(V + 37.0) / 80.0)
n_inf = alpha_n / (alpha_n + beta_n)
tau_n = 1.0 / (alpha_n + beta_n)
infs = vstack((m_inf, h_inf, n_inf))
taus = vstack((tau_m, tau_h, tau_n))
gates = vstack((m, h, n))
dxdt_ch = (infs - gates) / taus
self.dxdt = vstack((dxdt_V, dxdt_ch))
return self.dxdt
def RungeKutta4(self, t, x, I):
k1 = self.NeuronDerivs(t, x, I)
k2 = self.NeuronDerivs(t + 0.5 * self.dt, x + 0.5 * k1 * self.dt, I)
k3 = self.NeuronDerivs(t + 0.5 * self.dt, x + 0.5 * k2 * self.dt, I)
k4 = self.NeuronDerivs(t + self.dt, x + k3 * self.dt, I)
dx = (k1 + 2.0 * k2 + 2.0 * k3 + k4) * self.dt / 6.0
x_new = x + dx
return x_new
def update(self, I):
self.t += self.dt
self.x_now = self.RungeKutta4(self.t, self.x_now, I)
return self.x_now
# ----------------------------------------------------------------------------
# Neuron parameters
# ----------------------------------------------------------------------------
I_step_ini = -20.0
I_step_min = -30.0
I_step_max = 30.0
dt = 0.1
neuron = Neuron(1, dt)
t_now = 0.0
x_now = neuron.x_now
X = array([[t_now] * neuron.N])
V = x_now[0:1]
Gates = x_now[1:2]
I_step = [I_step_ini]
# ----------------------------------------------------------------------------
# Figure initialization
# ----------------------------------------------------------------------------
time_window = 1000
fig = figure()
subplots_adjust(left=0.15, bottom=0.25)
ax = fig.add_subplot(211)
ax.set_ylim(-80, 50)
ax.set_xlim(0, time_window)
ax.set_ylabel('Membrane potential [mV]')
ax.set_xlabel('Time [msec]')
lines, = ax.plot(X, V)
ax1 = fig.add_subplot(413)
ax1.set_ylim(-0.1, 1.1)
ax1.set_xlim(0, time_window)
ax1.set_ylabel('Gate variables [-]')
ax1.set_xlabel('Time [msec]')
lines1, = ax1.plot(X, Gates)
ax2 = fig.add_subplot(414)
ax2.set_ylim(I_step_min - 5, I_step_max + 5)
ax2.axhline(I_step_ini, ls='--', c='red')
ax2.set_xlim(0, time_window)
ax2.set_ylabel('I_step [uA]')
ax2.set_xlabel('Time [msec]')
lines2, = ax2.plot(X, I_step)
ax_I_step = axes([0.15, 0.10, 0.65, 0.03])
slider_I_step = Slider(
ax_I_step, 'I_step', I_step_min, I_step_max, valinit=I_step_ini)
# ----------------------------------------------------------------------------
# Main loop
# ----------------------------------------------------------------------------
while True:
I = slider_I_step.val
t_now = t_now + dt
x_now = neuron.update(I)
# ----------------------------------------------------------------------------
# Plot part
# ----------------------------------------------------------------------------
if max(X) < time_window:
X = append(X, t_now)
V = append(V, x_now[0:1])
Gates = append(Gates, x_now[1:2])
I_step.append(slider_I_step.val)
lines.set_data(X, V)
lines1.set_data(X, Gates)
lines2.set_data(X, I_step)
pause(0.01)
else:
X += dt
V = append(V[1:], x_now[0:1])
Gates = append(Gates[1:], x_now[1:2])
I_step.append(slider_I_step.val)
I_step.pop(0)
lines.set_data(X, V)
lines1.set_data(X, Gates)
lines2.set_data(X, I_step)
ax.set_xlim((X.min(), X.max()))
ax1.set_xlim((X.min(), X.max()))
ax2.set_xlim((X.min(), X.max()))
pause(0.01)
|
matsu490/RealtimeSimulation
|
old/realtimeHH.py
|
Python
|
mit
| 4,791
|
[
"NEURON"
] |
1844f4ae16975bf25d6bdd1a1f1eac2964ddc207ff403f79e02d84f7f2c49877
|
trace = lambda x: None # or print
visit = lambda x: print(x, end=', ')
# breadth-first by items: add to end
def sumtree(L): # Breadth-first, explicit queue
tot = 0
items = list(L) # Start with copy of top level
while items:
trace(items)
front = items.pop(0) # Fetch/delete front item
if not isinstance(front, list):
tot += front # Add numbers directly
visit(front)
else:
items.extend(front) # <== Append all in nested list
return tot
L = [1, [2, [3, 4], 5], 6, [7, 8]] # Arbitrary nesting
print(sumtree(L)) # Prints 36
# Pathological cases
print(sumtree([1, [2, [3, [4, [5]]]]])) # Prints 15 (right-heavy)
print(sumtree([[[[[1], 2], 3], 4], 5])) # Prints 15 (left-heavy)
print('-'*40)
# depth-first by items: add to front (like recursive calls version)
def sumtree(L): # Depth-first, explicit stack
tot = 0
items = list(L) # Start with copy of top level
while items:
trace(items)
front = items.pop(0) # Fetch/delete front item
if not isinstance(front, list):
tot += front # Add numbers directly
visit(front)
else:
items[:0] = front # <== Prepend all in nested list
return tot
L = [1, [2, [3, 4], 5], 6, [7, 8]] # Arbitrary nesting
print(sumtree(L)) # Prints 36
# Pathological cases
print(sumtree([1, [2, [3, [4, [5]]]]])) # Prints 15 (right-heavy)
print(sumtree([[[[[1], 2], 3], 4], 5])) # Prints 15 (left-heavy)
print('-'*40)
# Breadth-first by levels
def sumtree(L):
tot = 0
levels = [L]
while levels:
trace(levels)
front = levels.pop(0) # Fetch/delete front path
for x in front:
if not isinstance(x, list):
tot += x # Add numbers directly
visit(x)
else:
levels.append(x) # Push/schedule nested lists
return tot
L = [1, [2, [3, 4], 5], 6, [7, 8]] # Arbitrary nesting
print(sumtree(L)) # Prints 36
# Pathological cases
print(sumtree([1, [2, [3, [4, [5]]]]])) # Prints 15 (right-heavy)
print(sumtree([[[[[1], 2], 3], 4], 5])) # Prints 15 (left-heavy)
print('-'*40)
|
simontakite/sysadmin
|
pythonscripts/learningPython/sumtree2.py
|
Python
|
gpl-2.0
| 2,753
|
[
"VisIt"
] |
e80917415ac66b2c701df46177b753a012b21a1d35bf4724098ed9f47a28a4eb
|
# -*- coding: utf-8 -*-
"""rallpacks_cable_hhchannel.py:
A cable with 1000 compartments with HH-type channels in it.
Last modified: Wed May 21, 2014 09:51AM
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2013, NCBS Bangalore"
__credits__ = ["NCBS Bangalore", "Bhalla Lab"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import moose
from moose import utils
import time
import os
import numpy as np
import matplotlib.pyplot as plt
import compartment as comp
EREST_ACT = -65e-3
per_ms = 1e3
dt = 5e-5
cable = []
def alphaM(A, B, V0, v):
'''Compute alpha_m at point v
aplha_m = A(v - v0 ) / (exp((v-V0)/B) - 1)
'''
return (A*(v-V0) / (np.exp((v - V0)/B) -1 ))
def alphaN(A, B, V0, v):
'''Compute alpha_n at point v
aplha_n = A(v-V0) / (exp((v-V0)/B) -1 )
'''
return alphaM(A, B, V0, v)
def betaM(A, B, V0, v):
'''Compute beta_m at point v
'''
return (A * np.exp((v-V0)/B))
def betaN(A, B, V0, v):
return betaM(A, B, V0, v)
def alphaH(A, B, V0, v):
'''Compute alpha_h at point v
'''
return (A * np.exp(( v - V0) / B))
def behaH(A, B, V0, v):
'''Compute beta_h at point v
'''
return (A * np.exp((v-V0)/B) + 1)
def createChannel(species, path, **kwargs):
"""Create a channel """
if species == 'na':
return sodiumChannel( path, **kwargs)
elif species == 'ca':
channel.Xpower = 4
else:
utils.dump("FATAL", "Unsupported channel type: {}".format(species))
raise RuntimeError("Unsupported species of chanel")
def create_na_chan(parent='/library', name='na', vmin=-110e-3, vmax=50e-3, vdivs=3000):
"""Create a Hodhkin-Huxley Na channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
na = moose.HHChannel('%s/%s' % (parent, name))
na.Xpower = 3
na.Ypower = 1
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
m_alpha = per_ms * (25 - v * 1e3) / (10 * (np.exp((25 - v * 1e3) / 10) - 1))
m_beta = per_ms * 4 * np.exp(- v * 1e3/ 18)
m_gate = moose.element('%s/gateX' % (na.path))
m_gate.min = vmin
m_gate.max = vmax
m_gate.divs = vdivs
m_gate.tableA = m_alpha
m_gate.tableB = m_alpha + m_beta
h_alpha = per_ms * 0.07 * np.exp(-v / 20e-3)
h_beta = per_ms * 1/(np.exp((30e-3 - v) / 10e-3) + 1)
h_gate = moose.element('%s/gateY' % (na.path))
h_gate.min = vmin
h_gate.max = vmax
h_gate.divs = vdivs
h_gate.tableA = h_alpha
h_gate.tableB = h_alpha + h_beta
return na
def create_k_chan(parent='/library', name='k', vmin=-120e-3, vmax=40e-3, vdivs=3000):
"""Create a Hodhkin-Huxley K channel under `parent`.
vmin, vmax, vdivs: voltage range and number of divisions for gate tables
"""
k = moose.HHChannel('%s/%s' % (parent, name))
k.Xpower = 4
v = np.linspace(vmin, vmax, vdivs+1) - EREST_ACT
n_alpha = per_ms * (10 - v * 1e3)/(100 * (np.exp((10 - v * 1e3)/10) - 1))
n_beta = per_ms * 0.125 * np.exp(- v * 1e3 / 80)
n_gate = moose.element('%s/gateX' % (k.path))
n_gate.min = vmin
n_gate.max = vmax
n_gate.divs = vdivs
n_gate.tableA = n_alpha
n_gate.tableB = n_alpha + n_beta
return k
def creaetHHComp(parent='/library', name='hhcomp', diameter=1e-6, length=1e-6):
"""Create a compartment with Hodgkin-Huxley type ion channels (Na and
K).
Returns a 3-tuple: (compartment, nachannel, kchannel)
"""
compPath = '{}/{}'.format(parent, name)
mc = comp.MooseCompartment( compPath, length, diameter, {})
c = mc.mc_
sarea = mc.surfaceArea
if moose.exists('/library/na'):
moose.copy('/library/na', c.path, 'na')
else:
create_na_chan(parent = c.path)
na = moose.element('%s/na' % (c.path))
# Na-conductance 120 mS/cm^2
na.Gbar = 120e-3 * sarea * 1e4
na.Ek = 115e-3 + EREST_ACT
moose.connect(c, 'channel', na, 'channel')
if moose.exists('/library/k'):
moose.copy('/library/k', c.path, 'k')
else:
create_k_chan(parent = c.path)
k = moose.element('%s/k' % (c.path))
# K-conductance 36 mS/cm^2
k.Gbar = 36e-3 * sarea * 1e4
k.Ek = -12e-3 + EREST_ACT
moose.connect(c, 'channel', k, 'channel')
return (c, na, k)
def makeCable(args):
global cable
ncomp = args['ncomp']
moose.Neutral('/cable')
for i in range( ncomp ):
compName = 'hhcomp{}'.format(i)
hhComp = creaetHHComp( '/cable', compName )
cable.append( hhComp[0] )
# connect the cable.
for i, hhc in enumerate(cable[0:-1]):
hhc.connect('axial', cable[i+1], 'raxial')
def setupDUT( dt ):
global cable
comp = cable[0]
data = moose.Neutral('/data')
pg = moose.PulseGen('/data/pg')
pg.firstWidth = 25e-3
pg.firstLevel = 1e-10
moose.connect(pg, 'output', comp, 'injectMsg')
setupClocks( dt )
def setupClocks( dt ):
moose.setClock(0, dt)
moose.setClock(1, dt)
def setupSolver( hsolveDt ):
hsolvePath = '/hsolve'
hsolve = moose.HSolve( hsolvePath )
hsolve.dt = hsolveDt
hsolve.target = '/cable'
moose.useClock(1, hsolvePath, 'process')
def simulate( runTime, dt):
""" Simulate the cable """
moose.reinit()
setupSolver( hsolveDt = dt )
moose.start( runTime )
def main(args):
global cable
dt = args['dt']
makeCable(args)
setupDUT( dt )
t = time.time()
simulate( args['run_time'], dt )
print( 'Time to run %f seconds ' % ( time.time() - t ) )
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description = 'Rallpacks3: A cable with n compartment with HHChannel'
)
parser.add_argument( '--tau'
, default = 0.04
, type = float
, help = 'Time constant of membrane'
)
parser.add_argument( '--run_time'
, default = 0.25
, type = float
, help = 'Simulation run time'
)
parser.add_argument( '--dt'
, default = 5e-5
, type = float
, help = 'Step time during simulation'
)
parser.add_argument( '--Em'
, default = -65e-3
, type = float
, help = 'Resting potential of membrane'
)
parser.add_argument( '--RA'
, default = 1.0
, type = float
, help = 'Axial resistivity'
)
parser.add_argument( '--lambda'
, default = 1e-3
, type = float
, help = 'Lambda, what else?'
)
parser.add_argument( '--x'
, default = 1e-3
, type = float
, help = 'You should record membrane potential somewhere, right?'
)
parser.add_argument( '--length'
, default = 1e-3
, type = float
, help = 'Length of the cable'
)
parser.add_argument( '--diameter'
, default = 1e-6
, type = float
, help = 'Diameter of cable'
)
parser.add_argument( '--inj'
, default = 1e-10
, type = float
, help = 'Current injected at one end of the cable'
)
parser.add_argument( '--ncomp'
, default = 1000
, type = int
, help = 'No of compartment in cable'
)
parser.add_argument( '--output'
, default = None
, type = str
, help = 'Store simulation results to this file'
)
args = parser.parse_args()
main( vars(args) )
|
dharmasam9/moose-core
|
tests/python/Rallpacks/rallpacks_cable_hhchannel.py
|
Python
|
gpl-3.0
| 7,771
|
[
"MOOSE"
] |
d22b26e8be87f4ca6cfdd79f4d866361bdc9df2c72d408246b8c4d76a1c898ea
|
import pytest
from pkg_resources import resource_filename
from berny import Berny, geomlib, optimize
from berny.solvers import MopacSolver
@pytest.fixture
def mopac(scope='session'):
return MopacSolver()
def ethanol():
return geomlib.readfile(resource_filename('tests', 'ethanol.xyz')), 5
def aniline():
return geomlib.readfile(resource_filename('tests', 'aniline.xyz')), 11
def cyanogen():
return geomlib.readfile(resource_filename('tests', 'cyanogen.xyz')), 4
def water():
return geomlib.readfile(resource_filename('tests', 'water.xyz')), 7
@pytest.mark.parametrize('test_case', [ethanol, aniline, cyanogen, water])
def test_optimize(mopac, test_case):
geom, n_ref = test_case()
berny = Berny(geom)
optimize(berny, mopac)
assert berny.converged
assert berny._n == n_ref
|
azag0/pyberny
|
tests/test_optimize.py
|
Python
|
mpl-2.0
| 825
|
[
"MOPAC"
] |
49baea9e0e531127143fbed9d6a2c3b193182f4bc6255f6532521a83086c43d5
|
# -*- coding: iso-8859-1 -*-
# Copyright (C) 2007-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Author : Anthony Geay
from MEDLoader import *
""" This test generate a simple multi time field with a very aggressive time steps triplets. Neither dt, nor iteration nor order is considered.
In this case only the rank is considered.
"""
fname="testMEDReader7.med"
outImgName="testMEDReader7.png"
#########
arr=DataArrayDouble([(0,0,0),(1,0,0),(2,0,0),(3,0,0),(0,1,0),(1,1,0),(2,1,0),(3,1,0),(0,2,0),(1,2,0),(2,2,0),(3,2,0),(0,3,0),(1,3,0),(2,3,0),(3,3,0)])
m0=MEDCouplingUMesh("mesh",2) ; m0.setCoords(arr) ; m0.allocateCells()
for elt in [[2,3,6],[3,7,6],[6,9,5],[6,10,9]]:
m0.insertNextCell(NORM_TRI3,elt)
pass
for elt in [[0,4,5,1],[5,6,2,1],[4,8,9,5],[6,10,11,7],[8,12,13,9],[9,13,14,10],[10,14,15,11]]:
m0.insertNextCell(NORM_QUAD4,elt)
pass
mm=MEDFileUMesh()
mm.setMeshAtLevel(0,m0)
grp0=DataArrayInt([0,1,4,5,7,10]) ; grp0.setName("grp0")
mm.setGroupsAtLevel(0,[grp0])
fmts=MEDFileFieldMultiTS()
#
fNode=MEDCouplingFieldDouble(ON_NODES) ; fNode.setName("fNode")
fNode.setMesh(m0)
fNode.setArray(DataArrayDouble([3,2,1,0,3.16,2.23,1.41,1,3.6,2.82,2.23,2,4.24,3.6,3.16,3]))
fNode.getArray().setInfoOnComponent(0,"C0")
fNode.setTime(0.5,1,1)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,1,2)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,2,1)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
fNode.getArray().reverse()
fNode.setTime(0.5,2,2)
f1ts=MEDFileField1TS() ; f1ts.setFieldNoProfileSBT(fNode) ; fmts.pushBackTimeStep(f1ts)
#
mm.write(fname,2)
fmts.write(fname,0)
################### MED write is done -> Go to MEDReader
from paraview.simple import *
myMedReader=MEDReader(FileName=fname)
myMedReader.AllArrays = ['TS0/mesh/ComSup0/fNode@@][@@P1']
assert(list(myMedReader.TimestepValues)==[0.,1.,2.,3.])
RenderView1 = GetRenderView()
RenderView1.CameraFocalPoint = [1.5, 1.5, 0.0]
RenderView1.CameraPosition = [1.5, 1.5, 10000.0]
RenderView1.InteractionMode = '3D'
RenderView1.CameraPosition = [1.5, 1.5, 8.196152422706632]
RenderView1.CameraClippingRange = [7.825640906782493, 8.682319698595558]
RenderView1.CameraParallelScale = 2.1213203435596424
RenderView1.CenterOfRotation = [1.5, 1.5, 0.0]
DataRepresentation4 = Show()
DataRepresentation4.EdgeColor = [0.0, 0.0, 0.5000076295109483]
DataRepresentation4.SelectionPointFieldDataArrayName = 'fNode'
DataRepresentation4.ScaleFactor = 0.3182729169726372
a1_fGauss_PVLookupTable = GetLookupTableForArray( "fNode", 1, RGBPoints=[0.22, 0.23, 0.299, 0.754, 2.95, 0.706, 0.016, 0.15], VectorMode='Magnitude', NanColor=[0.25, 0.0, 0.0], ColorSpace='Diverging', ScalarRangeInitialized=1.0, AllowDuplicateScalars=1 )
a1_fGauss_PiecewiseFunction = CreatePiecewiseFunction( Points=[0.0, 0.0, 0.5, 0.0, 1.0, 1.0, 0.5, 0.0] )
DataRepresentation4.ColorArrayName = 'fNode'
DataRepresentation4.LookupTable = a1_fGauss_PVLookupTable
a1_fGauss_PVLookupTable.ScalarOpacityFunction = a1_fGauss_PiecewiseFunction
RenderView1.ViewTime = 1.0 #### Important # red is in right bottom
RenderView1.CacheKey = 1.0
RenderView1.UseCache = 1
RenderView1.ViewSize=[300,300]
WriteImage(outImgName)
|
FedoraScientific/salome-paravis
|
src/Plugins/MEDReader/Test/testMEDReader7.py
|
Python
|
lgpl-2.1
| 4,157
|
[
"ParaView"
] |
aa12cdcbb4a96a543122640f280c799f31285e622cf30c2aab20576c585c9337
|
"""
Utility code that provides classes helpful in choosing a suitable TVTK
class. It does this by providing a list of all the classes along with
the option to be able to search for the documentation.
The nice thing about the UI is that it performs some kind of completion
on names typed by the user, plus it allows users to search through the
TVTK class docs very easily. Once a search string is typed the
completion and available lists are modified so you can do completion of
the searched class names. If a unique enough string is typed the class
docs are shown.
"""
# Author: Prabhu Ramachandran <prabhu [at] aero . iitb . ac . in>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import vtk
import types
import inspect
# Enthought library imports.
from traits.api import HasTraits, Property, List, Str, \
Instance, Button, Int
from traitsui.api import View, Group, Item, EnumEditor,\
ListEditor, TextEditor
from tvtk.api import tvtk
from tvtk.common import get_tvtk_name
################################################################################
# Utility functions.
################################################################################
def get_tvtk_class_names():
"""Returns 4 lists:
1. A list of all the TVTK class names that are not abstract.
2. A list of the TVTK sources (have only outputs and no inputs)
3. A list of the TVTK filters (both inputs and outputs)
4. A list of the TVTK sinks (only inputs and no outputs)
"""
# Shut of VTK warnings for the time being.
o = vtk.vtkObject
w = o.GetGlobalWarningDisplay()
o.SetGlobalWarningDisplay(0) # Turn it off.
all = []
src = []
filter = []
sink = []
for name in dir(vtk):
if name.startswith('vtk') and not name.startswith('vtkQt'):
klass = getattr(vtk, name)
try:
c = klass()
except (TypeError, NotImplementedError):
continue
tvtk_name = get_tvtk_name(name)
all.append(tvtk_name)
has_input = has_output = False
if hasattr(klass, 'GetNumberOfInputPorts'):
if c.GetNumberOfInputPorts() > 0:
has_input = True
if hasattr(klass, 'GetNumberOfOutputPorts'):
if c.GetNumberOfOutputPorts() > 0:
has_output = True
if has_input:
if has_output:
filter.append(tvtk_name)
else:
sink.append(tvtk_name)
elif has_output:
src.append(tvtk_name)
o.SetGlobalWarningDisplay(w)
result = (all, src, filter, sink)
for x in result:
x.sort()
return result
def get_func_doc(func, fname):
"""Returns function documentation."""
if inspect.isfunction(func):
func_obj = func
elif inspect.ismethod(func):
func_obj = func.im_func
else:
return ''
args, vargs, vkw = inspect.getargs(func_obj.func_code)
defaults = func_obj.func_defaults
doc = fname + inspect.formatargspec(args, vargs, vkw, defaults)
d = inspect.getdoc(func)
if d is not None:
doc += '\n\n' + d + '\n\n'
return doc
def get_tvtk_class_doc(obj):
"""Return's the objects documentation."""
doc = obj.__doc__ + '\nTraits:\n-------------------\n\n'
ignore = ['trait_added', 'trait_modified']
for key, trait in obj.traits().iteritems():
if key.startswith('_') or key.endswith('_') or key in ignore:
continue
doc += '\n%s: %s'%(key, trait.help)
doc += '\nMethods:\n----------------------\n\n'
traits = obj.trait_names()
for name in dir(obj):
if name in traits or name.startswith('_'):
continue
if name.find('trait') > -1 and name != 'update_traits':
continue
func = getattr(obj, name)
if callable(func):
doc += '\n' + get_func_doc(func, name)
return doc
# GLOBALS
TVTK_CLASSES, TVTK_SOURCES, TVTK_FILTERS, TVTK_SINKS = get_tvtk_class_names()
################################################################################
# `DocSearch` class.
################################################################################
class DocSearch(object):
"""A simple class that provides a method to search through class
documentation. This code is taken from mayavi-1.x's ivtk.VtkHelp
"""
# These are class attributes to prevent regenerating them everytime
# this class is instantiated.
VTK_CLASSES = []
VTK_CLASS_DOC = []
def __init__(self):
self.vtk_classes = self.VTK_CLASSES
self.vtk_c_doc = self.VTK_CLASS_DOC
if len(self.VTK_CLASSES) == 0:
self._setup_data()
def _setup_data(self):
self.vtk_classes = [x for x in dir(vtk) if x.startswith('vtk')]
n = len(self.vtk_classes)
# Store the class docs in the list given below.
self.vtk_c_doc = ['']*n
# setup the data.
for i in range(n):
c = self.vtk_classes[i]
try:
doc = getattr(vtk, c).__doc__.lower()
self.vtk_c_doc[i] = doc
except AttributeError:
pass
def search(self, word):
""" Search for word in class documentation and return matching
classes. This is also case insensitive. The searching
supports the 'and' and 'or' keywords that allow for fairly
complex searches. A space between words assumes that the two
words appear one after the other.
Parameters
----------
word -- name to search for.
"""
assert type(word) in types.StringTypes, \
"Sorry, passed argument, %s is not a string."%word
if len(word.strip()) == 0:
return []
lword = word.lower().strip()
tmp_list = lword.split()
wlist = []
prev = ""
for w in tmp_list:
z = w.strip()
if z in ('and', 'or'):
if prev and prev not in ('and', 'or'):
wlist.append(prev)
wlist.append(z)
prev = z
else:
if prev and prev not in ('and', 'or'):
prev = prev + ' ' + z
else:
prev = z
if prev in ('and', 'or'):
del wlist[-1]
elif prev:
wlist.append(prev)
ret = []
i = 0
vtk_classes = self.vtk_classes
vtk_c_doc = self.vtk_c_doc
N = len(vtk_classes)
while i < N:
stored_test = 0
do_test = ''
for w in wlist:
if w == 'and':
do_test = 'and'
elif w == 'or':
do_test = 'or'
else:
test = (vtk_c_doc[i].find(w) > -1)
if do_test == 'and':
stored_test = stored_test and test
elif do_test == 'or':
stored_test = stored_test or test
elif do_test == '':
stored_test = test
if stored_test:
ret.append(vtk_classes[i])
i = i + 1
return [get_tvtk_name(x) for x in ret]
_search_help_doc = """
Help on Searching
---------------------------------------
To search for a particular TVTK class, type in the 'class_name' text entry
widget. The class names are all case sensitive. You may also select
the class from the list of available class names at the top.
As you type you will see completion options in the completions
list, the instant a complete match is found the class documentation will
be show in the bottom.
You can also search the TVTK class documentation for strings (case
insensitive). The search option supports the 'and' and 'or' keywords to
do advanced searches. Press <Enter>/<Return> to perform the search.
The top 25 hits will show up in the completions, to view a particular
hit either select the choice from the available ones or type in the
name in the 'class_name' entry box. To clear the search string click
the 'Clear search' button or erase the search string manually.
"""
################################################################################
# `TVTKClassChooser` class.
################################################################################
class TVTKClassChooser(HasTraits):
# The selected object, is None if no valid class_name was made.
object = Property
# The TVTK class name to choose.
class_name = Str('', desc='class name of TVTK class (case sensitive)')
# The string to search for in the class docs -- the search supports
# 'and' and 'or' keywords.
search = Str('', desc='string to search in TVTK class documentation '\
'supports the "and" and "or" keywords. '\
'press <Enter> to start search. '\
'This is case insensitive.')
clear_search = Button
# The class documentation.
doc = Str(_search_help_doc)
# Completions for the choice of class.
completions = List(Str)
# List of available class names as strings.
available = List(TVTK_CLASSES)
########################################
# Private traits.
finder = Instance(DocSearch)
n_completion = Int(25)
########################################
# View related traits.
view = View(Group(Item(name='class_name',
editor=EnumEditor(name='available')),
Item(name='class_name',
has_focus=True
),
Item(name='search',
editor=TextEditor(enter_set=True,
auto_set=False)
),
Item(name='clear_search',
show_label=False),
Item('_'),
Item(name='completions',
editor=ListEditor(columns=3),
style='readonly'
),
Item(name='doc',
resizable=True,
label='Documentation',
style='custom')
),
id='tvtk_doc',
resizable=True,
width=800,
height=600,
title='TVTK class chooser',
buttons = ["OK", "Cancel"]
)
######################################################################
# `object` interface.
######################################################################
def __init__(self, **traits):
super(TVTKClassChooser, self).__init__(**traits)
self._orig_available = list(self.available)
######################################################################
# Non-public interface.
######################################################################
def _get_object(self):
o = None
if len(self.class_name) > 0:
try:
o = getattr(tvtk, self.class_name)()
except (AttributeError, TypeError):
pass
return o
def _class_name_changed(self, value):
av = self.available
comp = [x for x in av if x.startswith(value)]
self.completions = comp[:self.n_completion]
if len(comp) == 1 and value != comp[0]:
self.class_name = comp[0]
o = self.object
if o is not None:
self.doc = get_tvtk_class_doc(o)
else:
self.doc = _search_help_doc
def _finder_default(self):
return DocSearch()
def _clear_search_fired(self):
self.search = ''
def _search_changed(self, value):
if len(value) < 3:
self.available = self._orig_available
return
f = self.finder
result = f.search(str(value))
if len(result) == 0:
self.available = self._orig_available
elif len(result) == 1:
self.class_name = result[0]
else:
self.available = result
self.completions = result[:self.n_completion]
################################################################################
# `TVTKSourceChooser` class.
################################################################################
class TVTKSourceChooser(TVTKClassChooser):
available = List(TVTK_SOURCES)
################################################################################
# `TVTKFilterChooser` class.
################################################################################
class TVTKFilterChooser(TVTKClassChooser):
available = List(TVTK_FILTERS)
################################################################################
# `TVTKSinkChooser` class.
################################################################################
class TVTKSinkChooser(TVTKClassChooser):
available = List(TVTK_SINKS)
def main():
"""Pops up a class chooser which doubles as a nice help search
documentation tool.
"""
s = TVTKClassChooser()
s.configure_traits()
if __name__ == '__main__':
main()
|
liulion/mayavi
|
tvtk/tools/tvtk_doc.py
|
Python
|
bsd-3-clause
| 13,568
|
[
"Mayavi",
"VTK"
] |
3ea29bdb332ae4e0a10979a6e8363b003673b2469f86f1c73ff2b44f4489032b
|
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.464044
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/serviceplayable.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class serviceplayable(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(serviceplayable, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_53965751 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2serviceplayable>
\t<e2servicereference>''')
_v = VFFSL(SL,"service.servicereference",True) # u'$service.servicereference' on line 4, col 22
if _v is not None: write(_filter(_v, rawExpr=u'$service.servicereference')) # from line 4, col 22.
write(u'''</e2servicereference>
\t<e2isplayable>''')
_v = VFFSL(SL,"str",False)(VFFSL(SL,"service.isplayable",True)) # u'$str($service.isplayable)' on line 5, col 16
if _v is not None: write(_filter(_v, rawExpr=u'$str($service.isplayable)')) # from line 5, col 16.
write(u'''</e2isplayable>
</e2serviceplayable>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_53965751
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_serviceplayable= 'respond'
## END CLASS DEFINITION
if not hasattr(serviceplayable, '_initCheetahAttributes'):
templateAPIClass = getattr(serviceplayable, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(serviceplayable)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=serviceplayable()).run()
|
MOA-2011/enigma2-plugin-extensions-openwebif
|
plugin/controllers/views/web/serviceplayable.py
|
Python
|
gpl-2.0
| 5,414
|
[
"VisIt"
] |
e61ff799385c9bca00f4fd380f89093435b2f94af3a74e553cfbabdbe398e714
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
# Copyright © xh
# CreateTime: 2016-06-03 13:55:13
# this is the py version of blast
import sys
#import networkx as nx
from math import log10
import os
#from subprocess import getoutput
if sys.version_info.major == 2:
from commands import getoutput
else:
from subprocess import getoutput
from mmap import mmap, ACCESS_WRITE, ACCESS_READ
from collections import Counter
import io
#open = io.open
# print the manual
def manual_print():
print('Usage:')
# print ' python find_orth.py -i foo.sc [-c .5] [-y 50] [-n no]'
print(' python %s -i foo.sc [-c .5] [-y 50] [-n no]' % sys.argv[0])
print('Parameters:')
print(' -i: tab-delimited file which contain 14 columns')
print(' -c: min coverage of sequence [0~1]')
print(' -y: identity [0~100]')
print(
' -n: normalization score [no|bsr|bal]. bsr: bit sore ratio; bal: bit score over anchored length. Default: no')
print(' -a: cpu number for sorting. Default: 1')
print(' -t: keep tmpdir[y|n]. Default: n')
print(' -T: tmpdir for sort command. Default: ./tmp/')
argv = sys.argv
# recommand parameter:
args = {'-i': '', '-c': .5, '-y': 0, '-n': 'no',
'-t': 'n', '-a': '4', '-T': './tmp/'}
N = len(argv)
for i in range(1, N):
k = argv[i]
if k in args:
try:
v = argv[i + 1]
except:
break
args[k] = v
elif k[:2] in args and len(k) > 2:
args[k[:2]] = k[2:]
else:
continue
if args['-i'] == '':
manual_print()
raise SystemExit()
try:
qry, coverage, identity, norm, tmpdir, cpu, tmpsrt = args[
'-i'], float(args['-c']), float(args['-y']), args['-n'], args['-t'], int(args['-a']), args['-T']
except:
manual_print()
raise SystemExit()
# make tmp dir for sort command
if tmpsrt != '/tmp/' or tmpsrt != '/tmp':
os.system('mkdir -p %s' % tmpsrt)
#qry = sys.argv[1]
qry = os.path.abspath(qry)
fn = qry.split(os.sep)[-1]
os.system('mkdir -p %s_tmp/' % qry)
os.system('ln -sf %s %s_tmp/' % (qry, qry))
qry = qry + '_tmp/' + fn
# blast parser, return list contains blast results with the same query id
# remove the duplicated pairs or qid-sid
def blastparse0(f, coverage=.5, identity=0., norm='no', len_dict={}):
output = {}
#len_dict = {}
flag = None
# max bit score
mbsc = -1
for i in f:
j = i[: -1].split('\t')
qid, sid = j[:2]
qtx, stx = qid.split('|')[0], sid.split('|')[0]
key = sid
idy, aln, mis, gop, qst, qed, sst, sed, evalue, score = list(
map(float, j[2:12]))
# the fastclust seq search format
if len(j) > 13:
qln, sln = list(map(float, j[12:14]))
else:
if qid in len_dict:
qln = len_dict[qid]
else:
qln = max(qst, qed)
len_dict[qid] = qln
if sid in len_dict:
sln = len_dict[sid]
else:
sln = max(sst, sed)
len_dict[sid] = sln
qcv = (1. + abs(qed - qst)) / qln
scv = (1. + abs(sed - sst)) / sln
if qcv < coverage or scv < coverage or idy < identity:
continue
if flag != qid:
if output:
yield list(output.values())
mbsc = score
# print 'max bit score is', mbsc, qid, sid
output = {}
length = aln
flag = qid
if norm == 'bsr':
Score = score / mbsc
elif norm == 'bal':
Score = score / aln
else:
Score = score
output[key] = [qid, sid, Score]
else:
if norm == 'bsr':
Score = score / mbsc
elif norm == 'bal':
Score = score / aln
else:
Score = score
if key not in output or output[key][-1] < Score:
output[key] = [qid, sid, Score]
if output:
yield list(output.values())
# parse blast -m8 format (12 cols) or swiftOrtho -sc format (16 cols)
def blastparse(f, coverage=.5, identity=0., norm='no'):
output = {}
len_dict = {}
flag = None
# max bit score
#mbsc = -1
mbs_dict = {}
for i in f:
j = i[: -1].split('\t')
# if len(j) != 12 or len(j) != 16:
# continue
qid, sid = j[:2]
qtx, stx = qid.split('|')[0], sid.split('|')[0]
key = sid
try:
idy, aln, mis, gop, qst, qed, sst, sed, evalue, score = list(
map(float, j[2:12]))
except:
continue
# the fastclust seq search format
if len(j) > 13:
try:
qln, sln = list(map(float, j[12:14]))
except:
continue
else:
if qid in len_dict:
qln = len_dict[qid]
else:
qln = max(qst, qed)
len_dict[qid] = qln
qcv = (1. + abs(qed - qst)) / qln
# if qcv<coverage or scv<coverage or idy<identity:
if qcv < coverage or idy < identity:
continue
if flag != qid:
if output:
yield list(output.values())
# print 'max bit score is', mbsc, qid, sid
output = {}
length = aln
flag = qid
if norm == 'bsr':
if qid not in mbsc_dict:
mbsc_dict[qid] = score
mbsc = mbsc_dict[qid]
Score = score / mbsc
elif norm == 'bal':
Score = score / aln
else:
Score = score
output[key] = [qid, sid, Score]
else:
if norm == 'bsr':
if qid not in mbsc_dict:
mbsc_dict[qid] = score
mbsc = mbsc_dict[qid]
Score = score / mbsc
elif norm == 'bal':
Score = score / aln
else:
Score = score
if key not in output or output[key][-1] < Score:
output[key] = [qid, sid, Score]
if output:
yield list(output.values())
# distinguish IP and O
# return the IP and O
def get_IPO0(hits, l2n={}):
# get max of each species
sco_max = Counter()
out_max = 0
for hit in hits:
qid, sid, sco = hit
sco = float(sco)
qtx = qid.split('|')[0]
stx = sid.split('|')[0]
sco_max[stx] = max(sco_max[stx], sco)
if qtx != stx:
out_max = max(out_max, sco)
visit = set()
ips, ots, cos = [], [], []
for hit in hits:
qid, sid, sco = hit
if sid not in l2n:
continue
x, y = list(map(l2n.get, [qid, sid]))
sco = float(sco)
if sid in visit:
continue
else:
visit.add(sid)
qtx = qid.split('|')[0]
stx = sid.split('|')[0]
#out = [qid, sid, sco]
out = [x, y, sco]
if qtx == stx:
if sco >= out_max:
# ips.append(hit)
ips.append(out)
else:
continue
else:
if sco >= sco_max[stx]:
# ots.append(hit)
ots.append(out)
else:
cos.append(out)
ips.sort()
ots.sort()
cos.sort()
IPs = ['\t'.join(map(str, elem)) + '\n' for elem in ips]
OTs = ['\t'.join(map(str, elem)) + '\n' for elem in ots]
COs = ['\t'.join(map(str, elem)) + '\n' for elem in cos]
return IPs, OTs, COs
# get qIP, qOT and qCO
def get_qIPO(hits):
# get max of each species
sco_max = Counter()
out_max = 0
for hit in hits:
qid, sid, sco = hit
sco = float(sco)
qtx = qid.split('|')[0]
stx = sid.split('|')[0]
sco_max[stx] = max(sco_max[stx], sco)
if qtx != stx:
out_max = max(out_max, sco)
visit = set()
ips, ots, cos = [], [], []
for hit in hits:
qid, sid, sco = hit
sco = float(sco)
if sid in visit:
continue
else:
visit.add(sid)
qtx = qid.split('|')[0]
stx = sid.split('|')[0]
qid, sid = qid < sid and [qid, sid] or [sid, qid]
out = [qid, sid, sco]
out = '\t'.join([qid, sid, str(sco)]) + '\n'
if qtx == stx:
if sco >= out_max and qid != sid:
# if sco >= out_max:
ips.append(out)
outr = '\t'.join([sid, qid, str(sco)]) + '\n'
ips.append(outr)
else:
continue
else:
if sco >= sco_max[stx]:
ots.append(out)
else:
cos.append(out)
# return IPs, OTs, COs
return ips, ots, cos
# get IP and OT
def get_IPO(f):
flag = None
output = []
for i in f:
j = i[:-1].split('\t')
qid, sid, score = j
if flag != j[:2]:
if len(output) == 4:
yield output[0], output[1], sum(output[2:4]) / 2., 1
elif len(output) == 3:
yield output[0], output[1], output[2], 0
else:
# continue
pass
flag = j[:2]
output = [qid, sid, float(score)]
else:
output.append(float(score))
if len(output) == 4:
# yield output[0], output[1], sum(output[2:4]) / 2., 1
yield output[0], output[1], max(output[2:4]), 1
elif len(output) == 3:
yield output[0], output[1], output[2], 0
else:
pass
# parse and find IP, O from blast results
f = open(qry, 'r')
qip = qry + '.qIPs.txt'
_oqips = open(qip, 'w')
qot = qry + '.qOTs.txt'
_oqots = open(qot, 'w')
qco = qry + '.qCOs.txt'
_oqcos = open(qco, 'w')
# for i in blastparse(f, coverage, identity, norm, len_dict):
for i in blastparse(f, coverage, identity, norm):
IPs, OTs, COs = get_qIPO(i)
# print IPs, OTs, COs, l2n
_oqips.writelines(IPs)
_oqots.writelines(OTs)
_oqcos.writelines(COs)
_oqips.close()
_oqots.close()
_oqcos.close()
# correct search results
def correct(s, m, l=None, r=None, sep=b'\n'):
# sep=sep.encode()
if not l and not r:
return s.rfind(sep, 0, m) + 1
M = s.rfind(sep, l, m) + 1
if l < M < r:
return M
else:
M = s.find(sep, m, r) + 1
return M
def binary_search(s, p, key=lambda x: x.split('\t', 1)[0], L=0, R=-1, sep='\n'):
#mx = chr(255)
sep = sep.encode()
if type(p) == str:
p = p.encode()
n = len(s)
#pn = len(p)
R = R == -1 and n - 1 or R
l = correct(s, L, sep=sep)
r = correct(s, R, sep=sep)
# find left
while l < r:
m = (l + r) // 2
m = correct(s, m, l, r, sep=sep)
if m == l or m == r:
break
t = s[m: s.find(sep, m)]
pat = key(t)
#print(pat, p, type(p)==str, type(p.encode()))
#print(pat, p)
if pat >= p:
r = m
else:
l = m
left = m - 1
while left >= 0:
start = s.rfind(sep, 0, left)
line = s[start + 1: left]
if key(line) == p:
left = start
else:
break
left += 1
line = s[left: s.find(sep, left)]
if key(line) != p:
return -1, -1, []
right = left
while 1:
end = s.find(sep, right)
try:
target = key(s[right: end])
except:
target = None
# if key(s[right: end]) == p:
if target == p:
right = end + 1
else:
break
pairs = s[left: right].strip().split(sep)
return left, right, pairs
###############################################################################
# get OTs
###############################################################################
#inots = [-1] * len(l2n)
inots = set()
# sort qots
qotsrt = qot + '.srt'
os.system('export LC_ALL=C && sort -T %s --parallel=%s %s -o %s && rm %s' %
(tmpsrt, cpu, qot, qotsrt, qot))
ots = qry + '.OTs.txt'
_oots = open(ots, 'w')
f = open(qotsrt, 'r')
for qid, sid, sco, lab in get_IPO(f):
if lab == 1:
out = '\t'.join([qid, sid, str(sco)]) + '\n'
_oots.write(out)
inots.add(qid)
inots.add(sid)
else:
continue
_oots.close()
f.close()
os.system('rm %s' % qotsrt)
###############################################################################
# get IPs
###############################################################################
qipsrt = qip + '.srt'
os.system('export LC_ALL=C && sort -T %s --parallel=%s %s -o %s && rm %s' %
(tmpsrt, cpu, qip, qipsrt, qip))
ipqa = {}
IPqA = {}
ips = qry + '.IPs.txt'
_oips = open(ips, 'w')
f = open(qipsrt, 'r')
for qid, sid, sco, lab in get_IPO(f):
if lab == 1:
out = '\t'.join([qid, sid, str(sco)]) + '\n'
_oips.write(out)
qtx = qid.split('|')[0]
if qid < sid:
if qid in inots or sid in inots:
try:
ipqa[qtx][0] += float(sco)
ipqa[qtx][1] += 1.
except:
ipqa[qtx] = [float(sco), 1.]
try:
IPqA[qtx][0] += float(sco)
IPqA[qtx][1] += 1.
except:
IPqA[qtx] = [float(sco), 1.]
else:
continue
_oips.close()
f.close()
os.system('rm %s' % qipsrt)
for k in IPqA:
a, b = k in ipqa and ipqa[k] or IPqA[k]
IPqA[k] = a / b
#raise SystemExit()
###############################################################################
# get COs
###############################################################################
qcosrt = qco + '.srt'
os.system('export LC_ALL=C && sort -T %s --parallel=%s %s -o %s && rm %s' %
(tmpsrt, cpu, qco, qcosrt, qco))
cos = qry + '.COs.txt'
_ocos = open(cos, 'w')
fqcosrt = open(qcosrt, 'rb')
try:
Sqco = mmap(fqcosrt.fileno(), 0, access=ACCESS_READ)
except:
Sqco = ''
fips = open(ips, 'rb')
try:
Sips = mmap(fips.fileno(), 0, access=ACCESS_READ)
except:
Sips = ''
f = open(ots, 'r')
for i in f:
if not Sips:
break
# get ot pair
#print(i, str(i).split('\t'))
qid, sid, sco = i.split('\t')[:3]
#qid, sid = map(int, [qid, sid])
# get ip of ot
# print(type(b'\t'))
st, ed, qpairs = binary_search(Sips, qid, lambda x: x.split(b'\t', 2)[0])
qips = [elem.split(b'\t')[1] for elem in qpairs]
st, ed, spairs = binary_search(Sips, sid, lambda x: x.split(b'\t', 2)[0])
sips = [elem.split(b'\t')[1] for elem in spairs]
if not qpairs and not spairs:
continue
qips.append(qid.encode())
sips.append(sid.encode())
visit = set()
for qip in qips:
for sip in sips:
if qip != qid or sip != sid:
if (qip, sip) not in visit:
visit.add((qip, sip))
else:
continue
st, ed, pairs = binary_search(
Sqco, [qip, sip], lambda x: x.split(b'\t', 3)[:2])
if pairs:
xyzs = [elem.split(b'\t') for elem in pairs]
x, y = xyzs[0][:2]
sco = max([float(elem[2]) for elem in xyzs])
#print(x.decode(), y.decode(), sco)
_ocos.write(
'\t'.join([x.decode(), y.decode(), str(sco)]) + '\n')
#_ocos.write(pairs[0]+'\n')
else:
continue
_ocos.close()
f.close()
os.system('rm %s' % qcosrt)
###############################################################################
# print normalized IPs
###############################################################################
f = open(ips, 'r')
for i in f:
# print 'all_IP\t' + i[:-1]
#x, y, score = i[:-1].split('\t')
#x, y = map(int, [x, y])
#qid, sid = n2l[x], n2l[y]
qid, sid, score = i[:-1].split('\t')
if qid >= sid:
continue
tax = qid.split('|')[0]
avg = IPqA[tax]
score = float(score)
try:
out = list(map(str, ['IP', qid, sid, score / avg]))
except:
continue
print('\t'.join(out))
f.close()
IPqA.clear()
# get co or ot from same taxon
def get_sam_tax0(f, n2l):
flag = None
out = []
visit = set()
for i in f:
x, y, sco = i[:-1].split('\t')
x, y = list(map(int, [x, y]))
if (x, y) not in visit:
visit.add((x, y))
else:
continue
qid, sid = n2l[x], n2l[y]
qtx = qid.split('|')[0]
sco = float(sco)
if qtx != flag:
if out:
yield out
flag = qtx
out = [[qid, sid, sco]]
else:
out.append([qid, sid, sco])
if out:
yield out
# get orthology relationship with same tax name
def get_sam_tax(f):
flag = None
out = []
visit = set()
for i in f:
qid, sid, sco = i[:-1].split('\t')
qtx = qid.split('|')[0]
sco = float(sco)
if qtx != flag:
if out:
yield out
flag = qtx
out = [[qid, sid, sco]]
visit = set((qid, sid))
else:
if (qid, sid) not in visit:
out.append([qid, sid, sco])
visit.add((qid, sid))
if out:
yield out
# normal co or ot
def n_co_ot(out):
avgs = {}
for qid, sid, sco in out:
stx = sid.split('|')[0]
try:
avgs[stx][0] += sco
avgs[stx][1] += 1.
except:
avgs[stx] = [sco, 1.]
for k in avgs:
a, b = avgs[k]
avgs[k] = a / b
for qid, sid, sco in out:
stx = sid.split('|')[0]
avg = avgs[stx]
yield [qid, sid, sco / avg]
# normal co or ot
def n_co_ot(out):
avgs = {}
for qid, sid, sco in out:
stx = sid.split('|')[0]
try:
avgs[stx][0] += sco
avgs[stx][1] += 1.
except:
avgs[stx] = [sco, 1.]
for k in avgs:
a, b = avgs[k]
avgs[k] = a / b
for qid, sid, sco in out:
stx = sid.split('|')[0]
avg = avgs[stx]
yield [qid, sid, sco / avg]
###############################################################################
# print normalized OTs and COs
###############################################################################
f = open(ots, 'r')
for i in get_sam_tax(f):
for j in n_co_ot(i):
out = '\t'.join(map(str, j))
print('OT\t' + out)
f.close()
f = open(cos, 'r')
for i in get_sam_tax(f):
for j in n_co_ot(i):
out = '\t'.join(map(str, j))
print('CO\t' + out)
f.close()
if tmpdir == 'n':
os.system('rm -rf %s_tmp/' % qry)
if tmpsrt != '/tmp/' or tmpsrt != '/tmp':
os.system('rm -rf %s' % tmpsrt)
|
Rinoahu/fastclust
|
bin/find_orth.py
|
Python
|
gpl-3.0
| 19,015
|
[
"BLAST",
"VisIt"
] |
562bda55b8b7a63f56ab2df8f20827a4054425c8911deeb13247eda1e8090ee4
|
# FermiLib plugin to interface with Psi4
#
# Copyright (C) 2017 ProjectQ-Framework (www.projectq.ch)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Define version number here and read it from setup.py automatically"""
__version__ = "0.1a1"
|
ProjectQ-Framework/FermiLib-Plugin-Psi4
|
fermilibpluginpsi4/_version.py
|
Python
|
lgpl-3.0
| 853
|
[
"Psi4"
] |
44bb89f96f9a66b2ebe1f0f4ed2431bce8c3937d10eafc751db2ad0f1af8d055
|
# Encoding utf-8
# Written for Python 3.6
import sys
import pymatgen.io.nwchem as nwchem
"""
Quick script to study the total energy change of the system during an
optimization.
"""
if sys.argv[1]:
filename = sys.argv[1]
print('\nAnalyzing energies in ' + filename + '...\n')
else:
OSError('No target output file provided.')
out = nwchem.NwOutput(filename)
energy_data = out.data[0]['energies']
for i in range(len(energy_data)):
print('Step ' + str(i + 1) + ': Energy = ' + str(energy_data[i]))
|
mbercx/cage
|
cage/scripts/energyAnalysis.py
|
Python
|
mit
| 513
|
[
"NWChem",
"pymatgen"
] |
9f8bf2b620cebe96f61cda88dbc6e3f64691db49934325290466efb4d3c196b8
|
from django import forms
from edc_constants.constants import YES, NO, NOT_APPLICABLE
from .base_infant_model_form import BaseInfantModelForm
from ..models import InfantFeeding
class InfantFeedingForm(BaseInfantModelForm):
def clean(self):
cleaned_data = super(InfantFeedingForm, self).clean()
self.validate_other_feeding()
self.validate_took_formula()
self.validate_took_formula_not_yes()
self.validate_cows_milk()
self.validate_took_other_milk()
self.validate_breast_milk_weaning()
self.validate_formula_intro_occur(cleaned_data)
return cleaned_data
def validate_other_feeding(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('formula_intro_occur') == YES:
if not cleaned_data.get('formula_intro_date'):
raise forms.ValidationError('Question3: If received formula milk | foods | liquids since last'
' attended visit. Please provide intro date')
else:
if cleaned_data.get('formula_intro_date'):
raise forms.ValidationError('You mentioned no formula milk | foods | liquids received'
' since last visit in question 3. DO NOT PROVIDE DATE')
def validate_took_formula(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('took_formula') == YES:
if not cleaned_data.get('is_first_formula'):
raise forms.ValidationError(
'Question7: Infant took formula, is this the first reporting of infant formula use?'
' Please provide YES or NO')
if cleaned_data.get('is_first_formula') == YES:
if not cleaned_data.get('date_first_formula'):
raise forms.ValidationError('If this is a first reporting of infant formula'
' please provide date and if date is estimated')
if not cleaned_data.get('est_date_first_formula'):
raise forms.ValidationError('If this is a first reporting of infant formula'
' please provide date and if date is estimated')
if cleaned_data.get('is_first_formula') == NO:
if cleaned_data.get('date_first_formula'):
raise forms.ValidationError('Question8: You mentioned that is not the first reporting of infant'
' formula PLEASE DO NOT PROVIDE DATE')
if cleaned_data.get('est_date_first_formula'):
raise forms.ValidationError('Question9: You mentioned that is not the first reporting of infant'
' formula PLEASE DO NOT PROVIDE EST DATE')
def validate_took_formula_not_yes(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('took_formula') != YES:
if cleaned_data.get('is_first_formula'):
raise forms.ValidationError('Question7: You mentioned that infant did not take formula,'
' PLEASE DO NOT PROVIDE FIRST FORMULA USE INFO')
if cleaned_data.get('date_first_formula'):
raise forms.ValidationError('Question8: You mentioned that infant did not take formula,'
' PLEASE DO NOT PROVIDE DATE OF FIRST FORMULA USE')
if cleaned_data.get('est_date_first_formula'):
raise forms.ValidationError('Question9: You mentioned that infant did not take formula,'
' PLEASE DO NOT PROVIDE ESTIMATED DATE OF FIRST FORMULA USE')
def validate_cows_milk(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('cow_milk') == YES:
if cleaned_data.get('cow_milk_yes') == 'N/A':
raise forms.ValidationError('Question13: If infant took cows milk. Answer CANNOT be Not Applicable')
else:
if not cleaned_data.get('cow_milk_yes') == 'N/A':
raise forms.ValidationError('Question13: Infant did not take cows milk. Answer is NOT APPLICABLE')
def validate_took_other_milk(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('other_milk') == YES:
if not cleaned_data.get('other_milk_animal'):
raise forms.ValidationError('Question15: The infant took milk from another animal, please specify'
' which?')
if cleaned_data.get('milk_boiled') == NOT_APPLICABLE:
raise forms.ValidationError('Question16:The infant took milk from another animal, answer'
' cannot be N/A')
else:
if cleaned_data.get('other_milk_animal'):
raise forms.ValidationError('Question15: The infant did not take milk from any other animal, please'
' do not provide the name of the animal')
if cleaned_data.get('milk_boiled') != NOT_APPLICABLE:
raise forms.ValidationError('Question16: The infant did not take milk from any other animal, the'
' answer for whether the milk was boiled should be N/A')
def validate_breast_milk_weaning(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('ever_breastfeed') == YES:
if cleaned_data.get('complete_weaning') != NOT_APPLICABLE:
raise forms.ValidationError('Question24: The infant has been breastfed since the last visit, The answer'
' answer should be N/A')
else:
if cleaned_data.get('complete_weaning') == NOT_APPLICABLE:
raise forms.ValidationError('Question24: The infant has not been breastfed since the last visit, '
'The answer should not be N/A')
def validate_formula_intro_occur(self, cleaned_data):
if cleaned_data.get('formula_intro_occur') == YES:
if cleaned_data.get('formula_intro_date'):
answer = False
for question in ['juice', 'cow_milk', 'other_milk', 'fruits_veg',
'cereal_porridge', 'solid_liquid']:
if cleaned_data.get(question) == YES:
answer = True
break
if not answer:
raise forms.ValidationError(
'You should answer YES on either one of the questions about the juice, cow_milk, other milk, '
'fruits_veg, cereal_porridge or solid_liquid')
class Meta:
model = InfantFeeding
fields = '__all__'
|
TshepangRas/tshilo-dikotla
|
td_infant/forms/infant_feeding_form.py
|
Python
|
gpl-2.0
| 6,922
|
[
"VisIt"
] |
77429c13b7252c9eefacb4a214bf9b69d11e08a90e7590670e44f2a14d088dac
|
#!/usr/bin/env python3
import sys
import hashlib
import re
import math
from itertools import count, islice, starmap
import itertools
import wave
import struct
import argparse
import re
# Identitione
# Definitions:
# tone: collection of sounds that make an identifying audio bite i.e. an identitone
# sound: collection of notes, is a homogenous repeating waveform
# note: a single pitch, chosen from the used_notes list.
# tonedef: a tone definition, consisting of snddefs
# snddef: a sound definition, consisting of a list of notes
# First makes a tonedefgen, which is an infinite generator of snddefgens
# A snddefgen is an infinite generator that yields notes using the seeder
# Second it makes a tonegen, which is an infinite generator of sndgens
# A sndgen is an infinite generator of amplitudes made from the combined notes for that sound
# Then it uses the tonegen to make an actual tone, which is a list of numbers indicating the amplitude
# Have a power of two (32) for the number of notes for perfectionism's sake
# It will take 5 bits to identify a random note from this list
used_notes = ["E4", "F4", "F#4", "G4", "G#4", "A4", "A#4", "B4", "C5", "C#5", "D5", "D#5", "E5", "F5", "F#5", "G5", "G#5", "A5", "A#5", "B5", "C6", "C#6", "D6", "D#6", "E6", "F6", "F#6", "G6", "G#6", "A6", "A#6", "B6"]
notes = {"C0": 16.35, "C#0": 17.32, "D0": 18.35, "D#0": 19.45, "E0": 20.60, "F0": 21.83, "F#0": 23.12, "G0": 24.50, "G#0": 25.96, "A0": 27.50, "A#0": 29.14, "B0": 30.87,
"C1":32.70 , "C#1": 34.65, "D1": 36.71, "D#1": 38.89, "E1": 41.20, "F1": 43.65, "F#1": 46.25, "G1": 49.00, "G#1": 51.91, "A1": 55.00, "A#1": 58.27, "B1": 61.74,
"C2": 65.41, "C#2": 69.30, "D2": 73.42, "D#2": 77.78, "E2": 82.41, "F2": 87.31, "F#2": 92.50, "G2": 98.00, "G#2": 103.83, "A2": 110.00, "A#2": 116.54, "B2": 123.47,
"C3": 130.81, "C#3": 138.59, "D3": 146.83, "D#3": 155.56, "E3": 164.81, "F3": 174.61, "F#3": 185.00, "G3": 196.00, "G#3": 207.65, "A3": 220.00, "A#3": 233.08, "B3": 246.94,
"C4": 261.63, "C#4": 277.18, "D4": 293.66, "D#4": 311.13, "E4": 329.63, "F4": 349.23, "F#4": 369.99, "G4": 392.00, "G#4": 415.30, "A4": 440.00, "A#4": 466.16, "B4": 493.88,
# Anything below 400 doesn't play very well on my Galaxy S3 speaker
"C5": 523.25, "C#5": 554.37, "D5": 587.33, "D#5": 622.25, "E5": 659.25, "F5": 698.46, "F#5": 739.99, "G5": 783.99, "G#5": 830.61, "A5": 880.00, "A#5": 932.33, "B5": 987.77,
"C6": 1046.50, "C#6": 1108.73, "D6": 1174.66, "D#6": 1244.51, "E6": 1318.51, "F6": 1396.91, "F#6": 1479.98, "G6": 1567.98, "G#6": 1661.22, "A6": 1760.00, "A#6": 1864.66, "B6": 1975.53,
# The high notes can be piercing and unpleasant
"C7": 2093.00, "C#7": 2217.46, "D7": 2349.32, "D#7": 2489.02, "E7": 2637.02, "F7": 2793.83, "F#7": 2959.96, "G7": 3135.96, "G#7": 3322.44, "A7": 3520.00, "A#7": 3729.31, "B7": 3951.07,
"C8": 4186.01, "C#8": 4434.92, "D8": 4698.63, "D#8": 4978.03, "E8": 5274.04, "F8": 5587.65, "F#8": 5919.91, "G8": 6271.93, "G#8": 6644.88, "A8": 7040.00, "A#8": 7458.62, "B8": 7902.13}
email_regex = re.compile("\A[\w+\-.]+@([a-z\d\-]+\.)+[a-z]+\Z")
phone_regex = re.compile("\A(1[-_ ]?)?([0-9][0-9][0-9][-_ ]?)?([0-9][0-9][0-9][-_ ]?)([0-9][0-9][0-9][0-9])\Z")
phone_strip_list = ['-', '_', ' ']
lookup_dict = {}
default_amplitude=0.9
def sine_wave(freq=440.00, rate=44100, amp=0.9, harmonic=True):
if amp > 1.0:
amp = 1.0
if amp < 0.0:
amp = 0.0
per = int(rate/freq)
if rate is 44100 and amp is 0.9 and freq in lookup_dict:
return lookup_dict[freq]
else:
interval = [float(amp) * math.sin(2.0 * math.pi * float(freq) * (float(i % per)/float(rate)))
for i in range(per)]
if harmonic:
interval = [(i + (float(amp / 2) * math.sin(2.0 * math.pi * float(freq * 2) * (float(i % int(per / 2))/float(rate))))) / 1.5
for i in interval]
return (interval[i%per] for i in count())
lookup_dict = {notes[note]: sine_wave(notes[note]) for note in used_notes}
#tones = {name: sine_wave(freq=val) for name, val in notes.items()}
# Filters an identifying string into a proper seed string
def seed_from_value(seedstr):
filtered = None
lower = seedstr.strip().lower()
if phone_regex.match(lower):
numeric = lower
for char in phone_strip_list:
numeric = numeric.replace(char, "")
filtered = numeric.lstrip('1')
elif email_regex.match(lower):
filtered = lower
else:
filtered = lower
return filtered
def hash_seed(seed):
return hashlib.sha512(seed.encode("ascii")).hexdigest()
# Takes a hash and gives an infinite generator for making pseudorandom binary based on that hash
def make_seeder(hashdigest):
seed = hashdigest
for i in count():
binseed = bin(int(seed, base=16))[2:]
for char in binseed:
yield char
seed = hash_seed(seed)
# Generates a random int between nmin (inclusive) and nmax (exclusive)
def generate_int(seeder, nmax, nmin=0):
bstr = [next(seeder) for i in range(math.ceil(math.log(nmax - nmin, 2)))]
return int(''.join(bstr), base=2)
# Makes an infinite generator that yields notes from the seeder
def make_snddefgen(seeder):
return (used_notes[generate_int(seeder, len(used_notes))] for i in count())
# Gives an infinite generator that yields snddefgens (sound definition generators)
def make_tonedefgen(seeder):
return (make_snddefgen(seeder) for i in count())
def make_sndgen(snddefgen, numnotes, rate, harmonic):
notestrs = [next(snddefgen) for i in range(numnotes)]
#print(notestrs)
notenums = [notes[i] for i in notestrs]
notegens = map(lambda x: sine_wave(x, rate, default_amplitude, harmonic), notenums)
# Makes a generator of tuples of the zipped notegens, then maps that into the average of that tuple
return map(lambda x: sum(x) / numnotes, zip(*notegens))
# Makes an infinite generator that yields sndgens (sound generators)
def make_tonegen(tonedefgen, numnotes, rate, harmonic):
return (make_sndgen(snddefgen, numnotes, rate, harmonic) for snddefgen in tonedefgen)
# Makes an infinite generator that yields samples of the tone
def make_tone(tonegen, numsamples):
for sndgen in tonegen:
for sample in range(numsamples):
yield next(sndgen)
# Takes a single channel sound and makes it stereo
def duplicate_channels(sound):
return map(lambda x: (x, x), sound)
sha512regex = re.compile(r'^[0-9abcdef]{128}$')
def write_identitone(seed_hash, iofile, seconds, numnotes, sounds, rate, harmonic=True):
sampwidth = 2
nchannels = 2
if not sha512regex.match(seed_hash):
return None
seeder = make_seeder(seed_hash)
tonedefgen = make_tonedefgen(seeder)
tonegen = make_tonegen(tonedefgen, numnotes, rate, harmonic)
nframes = int(rate * seconds)
samples_per_sound = int(rate * (seconds / sounds))
tone = make_tone(tonegen, samples_per_sound)
stereotone = islice(duplicate_channels(tone), nframes)
max_amp = float(int((2 ** (sampwidth * 8)) / 2) - 1)
w = wave.open(iofile, 'w')
w.setparams((nchannels, sampwidth, rate, nframes, 'NONE', 'not compressed'))
frames = b''.join(b''.join(struct.pack('h', int(max_amp * sample)) for sample in channels) for channels in stereotone)
w.writeframesraw(frames)
w.close()
def make_identitone(identifier, filename="identitone.wav", seconds=6, numnotes=4, sounds=4, rate=44100, harmonic=True):
sampwidth = 2
nchannels = 2
seed = seed_from_value(identifier)
print("Seed value after filtering: " + seed)
hashdigest = hash_seed(seed)
print("Hash for seed is: " + hashdigest)
seeder = make_seeder(hashdigest)
tonedefgen = make_tonedefgen(seeder)
tonegen = make_tonegen(tonedefgen, numnotes, rate, harmonic)
nframes = int(rate * seconds)
samples_per_sound = int(rate * (seconds / sounds))
tone = make_tone(tonegen, samples_per_sound)
stereotone = islice(duplicate_channels(tone), nframes)
max_amp = float(int((2 ** (sampwidth * 8)) / 2) - 1)
w = wave.open(filename, 'w')
w.setparams((nchannels, sampwidth, rate, nframes, 'NONE', 'not compressed'))
frames = b''.join(b''.join(struct.pack('h', int(max_amp * sample)) for sample in channels) for channels in stereotone)
w.writeframesraw(frames)
w.close()
return tone
def main():
time=2
sounds=16
notes=1
rate=44100
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--time', help="Duration of identitone, default=" + str(time), default=time, type=float)
parser.add_argument('-s', '--sounds', help="Number of distinct parts in an identitone, default=" + str(sounds), default=sounds, type=int)
parser.add_argument('-n', '--notes', help="Number of notes in each part of the identitone, default=" + str(notes), default=notes, type=int)
parser.add_argument('-r', '--rate', help="Sample rate in Hz, default=" + str(rate), default=rate, type=int)
parser.add_argument('-H', '--no-harmonic', help="Do not add a second harmonic of 1/2 amplitude to notes", dest='harmonic', default=True, const=False, action='store_const')
parser.add_argument('seed', help="Seed string for creating the hash", type=str)
parser.add_argument('filename', help="File to generate", type=str)
args = parser.parse_args()
make_identitone(args.seed, args.filename, args.time, args.notes, args.sounds, args.rate, args.harmonic)
if __name__ == "__main__":
main()
|
brainwater/identitone
|
identitone.py
|
Python
|
gpl-2.0
| 9,561
|
[
"Galaxy"
] |
3e6bd45e9370313aef0b50d85665739bade37ccb065d88cc2e1f49950f65897d
|
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TensorFlow Federated is an open-source federated learning framework.
TensorFlow Federated (TFF) is an open-source framework for machine learning and
other computations on decentralized data. TFF has been developed to facilitate
open research and experimentation with Federated Learning (FL), an approach to
machine learning where a shared global model is trained across many
participating clients that keep their training data locally. For example, FL has
been used to train prediction models for mobile keyboards without uploading
sensitive typing data to servers.
TFF enables developers to use the included federated learning algorithms with
their models and data, as well as to experiment with novel algorithms. The
building blocks provided by TFF can also be used to implement non-learning
computations, such as aggregated analytics over decentralized data.
TFF's interfaces are organized in two layers:
* Federated Learning (FL) API
The `tff.learning` layer offers a set of high-level interfaces that allow
developers to apply the included implementations of federated training and
evaluation to their existing TensorFlow models.
* Federated Core (FC) API
At the core of the system is a set of lower-level interfaces for concisely
expressing novel federated algorithms by combining TensorFlow with distributed
communication operators within a strongly-typed functional programming
environment. This layer also serves as the foundation upon which we've built
`tff.learning`.
TFF enables developers to declaratively express federated computations, so they
could be deployed to diverse runtime environments. Included with TFF is a
single-machine simulation runtime for experiments. Please visit the
tutorials and try it out yourself!
"""
# TODO(b/124800187): Keep in sync with the contents of README.
import datetime
import sys
import setuptools
DOCLINES = __doc__.split('\n')
REQUIRED_PACKAGES = [
'absl-py~=1.0.0',
'attrs~=21.2.0',
'cachetools~=3.1.1',
'dm-tree~=0.1.1',
'farmhashpy~=0.4.0',
'grpcio~=1.34.0',
'jax~=0.2.27',
'jaxlib~=0.1.76',
'numpy~=1.21.4',
'portpicker~=1.3.1',
'semantic-version~=2.8.5',
'tensorflow-model-optimization~=0.7.1',
'tensorflow-privacy~=0.8.0',
'tensorflow~=2.8.0',
'tqdm~=4.28.1',
'kubernetes~=21.7.0',
]
with open('tensorflow_federated/version.py') as fp:
globals_dict = {}
exec(fp.read(), globals_dict) # pylint: disable=exec-used
VERSION = globals_dict['__version__']
def get_package_name(requirement: str) -> str:
allowed_operators = ['~=', '<', '>', '==', '<=', '>=', '!=']
separator = allowed_operators[0]
for operator in allowed_operators[1:]:
requirement = requirement.replace(operator, separator)
name, _ = requirement.split(separator, maxsplit=1)
return name
if '--nightly' in sys.argv:
sys.argv.remove('--nightly')
PROJECT_NAME = 'tensorflow_federated_nightly'
date = datetime.date.today().strftime('%Y%m%d')
VERSION = '{}.dev{}'.format(VERSION, date)
for index, required_package in enumerate(REQUIRED_PACKAGES):
package_name = get_package_name(required_package)
if package_name == 'grpcio':
REQUIRED_PACKAGES[index] = 'grpcio~=1.37.0'
elif package_name == 'tensorflow':
REQUIRED_PACKAGES[index] = 'tf-nightly'
else:
PROJECT_NAME = 'tensorflow_federated'
setuptools.setup(
name=PROJECT_NAME,
version=VERSION,
packages=setuptools.find_packages(exclude=('tools')),
description=DOCLINES[0],
long_description='\n'.join(DOCLINES[2:]),
long_description_content_type='text/plain',
author='Google Inc.',
author_email='packages@tensorflow.org',
url='http://tensorflow.org/federated',
download_url='https://github.com/tensorflow/federated/tags',
install_requires=REQUIRED_PACKAGES,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow federated machine learning',
)
|
tensorflow/federated
|
tensorflow_federated/tools/python_package/setup.py
|
Python
|
apache-2.0
| 5,411
|
[
"VisIt"
] |
ebb043d3b5d8be98ad6dbc601d881bfea02ef0e26452d5d555240f401b6e6dbf
|
from rllab.envs.base import Env
from rllab.envs.base import Step
from rllab.spaces import Box
import numpy as np
class MultiMod2DEnv(Env):
"""
This is a single time-step MDP where the action taken corresponds to the next state (in a 2D plane).
The reward has a multi-modal gaussian shape, with the mode means set in a circle around the origin.
"""
def __init__(self, mu=(1, 0), sigma=0.01, n=2, rand_init=False):
self.mu = np.array(mu)
self.sigma = sigma #we suppose symetric Gaussians
self.n = n
self.rand_init = rand_init
@property
def observation_space(self):
return Box(low=-np.inf, high=np.inf, shape=(2,))
@property
def action_space(self):
return Box(low=5.0 * np.linalg.norm(self.mu), high=5.0 * np.linalg.norm(self.mu), shape=(2,))
def reset(self):
self._state = np.zeros(shape=(2,)) \
+ int(self.rand_init) * (
(np.random.rand(2, ) - 0.5) * 5 * np.linalg.norm(self.mu) ) ##mu is taken as largest
observation = np.copy(self._state)
return observation
def reward_state(self, state):
x = state
mu = self.mu
A = np.array([[np.cos(2. * np.pi / self.n), -np.sin(2. * np.pi / self.n)],
[np.sin(2. * np.pi / self.n), np.cos(2. * np.pi / self.n)]]) ##rotation matrix
reward = -0.5 + 1. / (2 * np.sqrt(np.power(2. * np.pi, 2.) * self.sigma)) * (
np.exp(-0.5 / self.sigma * np.linalg.norm(x - mu) ** 2))
for i in range(1, self.n):
mu = np.dot(A, mu)
reward += 1. / (2 * np.sqrt(np.power(2. * np.pi, 2.) * self.sigma)) * (
np.exp(-0.5 / self.sigma * np.linalg.norm(x - mu) ** 2))
return reward
def step(self, action):
self._state += action
done = True
next_observation = np.copy(self._state)
reward = self.reward_state(self._state)
return Step(observation=next_observation, reward=reward, done=done)
def render(self):
print('current state:', self._state)
def log_diagnostics(self, paths):
# to count the modes I need the current policy!
pass
|
florensacc/snn4hrl
|
envs/point/multiMod2D_env.py
|
Python
|
mit
| 2,207
|
[
"Gaussian"
] |
73b5b7876a82d578e3b102c6cd85009d9d37ce23981a1d3b60d3ff6ebc272901
|
# Copyright 2013 Devsim LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#set_parameter -name threads_available -value 1
#set_parameter -name threads_task_size -value 1024
import gmsh_mos2d_create
from devsim import *
from devsim.python_packages.simple_physics import *
from devsim.python_packages.ramp import *
from devsim.python_packages.Klaassen import *
from devsim.python_packages.mos_physics import *
# TODO: write out mesh, and then read back in as separate test
device = "mos2d"
silicon_regions=("gate", "bulk")
oxide_regions=("oxide",)
regions = ("gate", "bulk", "oxide")
interfaces = ("bulk_oxide", "gate_oxide")
for i in regions:
CreateSolution(device, i, "Potential")
for i in silicon_regions:
SetSiliconParameters(device, i, 300)
CreateSiliconPotentialOnly(device, i)
for i in oxide_regions:
SetOxideParameters(device, i, 300)
CreateOxidePotentialOnly(device, i, "log_damp")
### Set up contacts
contacts = get_contact_list(device=device)
for i in contacts:
tmp = get_region_list(device=device, contact=i)
r = tmp[0]
print("%s %s" % (r, i))
CreateSiliconPotentialOnlyContact(device, r, i)
set_parameter(device=device, name=GetContactBiasName(i), value=0.0)
for i in interfaces:
CreateSiliconOxideInterface(device, i)
#for d in get_device_list():
# for gn in get_parameter_list():
# print("{0} {1}").format(gn, get_parameter(device=d, name=gn))
# for gn in get_parameter_list(device=d):
# print("{0} {1} {2}").format(d, gn, get_parameter(device=d, name=gn))
# for r in get_region_list(device=d):
# for gn in get_parameter_list(device=d, region=r):
# print("{0} {1} {2} {3}").format(d, r, gn, get_parameter(device=d, region=r, name=gn))
#write_devices(file="foo.msh", type="devsim")
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
solve(type="dc", absolute_error=1.0e-13, relative_error=1e-12, maximum_iterations=30)
#
##write_devices -file gmsh_mos2d_potentialonly.flps -type floops
write_devices(file="gmsh_mos2d_potentialonly", type="vtk")
for i in silicon_regions:
CreateSolution(device, i, "Electrons")
CreateSolution(device, i, "Holes")
set_node_values(device=device, region=i, name="Electrons", init_from="IntrinsicElectrons")
set_node_values(device=device, region=i, name="Holes", init_from="IntrinsicHoles")
Set_Mobility_Parameters(device, i)
Klaassen_Mobility(device, i)
#use bulk Klaassen mobility
CreateSiliconDriftDiffusion(device, i, "mu_bulk_e", "mu_bulk_h")
for c in contacts:
tmp = get_region_list(device=device, contact=c)
r = tmp[0]
CreateSiliconDriftDiffusionAtContact(device, r, c)
for r in silicon_regions:
node_model(device=device, region=r, name="logElectrons", equation="log(Electrons)/log(10)")
CreateNormalElectricFieldFromCurrentFlow(device, r, "ElectronCurrent")
CreateNormalElectricFieldFromCurrentFlow(device, r, "HoleCurrent")
Philips_Surface_Mobility(device, r, "Enormal_ElectronCurrent", "Enormal_HoleCurrent")
#Philips_VelocitySaturation $device $region mu_vsat_e mu_bulk_e Eparallel_ElectronCurrent vsat_e
Philips_VelocitySaturation(device, r, "mu_vsat_e", "mu_e_0", "Eparallel_ElectronCurrent", "vsat_e")
CreateElementModel2d(device, r, "mu_ratio", "mu_vsat_e/mu_bulk_e")
CreateElementModel2d(device, r, "mu_surf_ratio", "mu_e_0/mu_bulk_e")
CreateElementModel2d(device, r, "epar_ratio", "abs(Eparallel_ElectronCurrent/ElectricField_mag)")
#createElementElectronCurrent2d $device $region ElementElectronCurrent mu_n
#createElementElectronCurrent2d $device $region ElementElectronCurrent mu_bulk_e
CreateElementElectronCurrent2d(device, r, "ElementElectronCurrent", "mu_vsat_e")
# element_from_edge_model -edge_model ElectricField -device $device -region $i
CreateElementModel2d(device, r, "magElementElectronCurrent", "log(abs(ElementElectronCurrent)+1e-10)/log(10)")
vector_element_model(device=device, region=r, element_model="ElementElectronCurrent")
# we aren't going to worry about holes for now.
#createNormalElectricFieldFromCurrentFlow $device $region HoleCurrent
CreateElementElectronContinuityEquation(device, r, "ElementElectronCurrent")
for contact in ("body", "drain", "source"):
CreateElementContactElectronContinuityEquation(device, contact, "ElementElectronCurrent")
#write_devices(file="debug.msh", type="devsim")
solve(type="dc", absolute_error=1.0e30, relative_error=1e-10, maximum_iterations=100)
write_devices(file="gmsh_mos2d_dd_kla_zero.dat", type="tecplot")
write_devices(file="gmsh_mos2d_dd_kla_zero", type="vtk")
drainbias=get_parameter(device=device, name=GetContactBiasName("drain"))
gatebias=get_parameter(device=device, name=GetContactBiasName("gate"))
rampbias(device, "gate", 0.5, 0.5, 0.001, 100, 1e-8, 1e30, printAllCurrents)
rampbias(device, "drain", 0.5, 0.1, 0.001, 100, 1e-8, 1e30, printAllCurrents)
write_devices(file="gmsh_mos2d_dd_kla.dat", type="tecplot")
write_devices(file="gmsh_mos2d_dd_kla", type="vtk")
|
devsim/devsim
|
examples/mobility/gmsh_mos2d_kla.py
|
Python
|
apache-2.0
| 5,542
|
[
"VTK"
] |
c841a41d86ecac390fe99d49e7bdcea7feecc9ec2f2fad511c1c9becf8544c30
|
import sys
import numpy
import pychemia
if pychemia.HAS_MAYAVI:
from mayavi.mlab import quiver
if not pychemia.HAS_MAYAVI:
sys.exit(1)
def test_quiver3d():
x, y, z = numpy.mgrid[-2:3, -2:3, -2:3]
r = numpy.sqrt(x ** 2 + y ** 2 + z ** 4)
u = y * numpy.sin(r) / (r + 0.001)
v = -x * numpy.sin(r) / (r + 0.001)
w = numpy.zeros_like(z)
obj = quiver3d(x, y, z, u, v, w, line_width=3, scale_factor=1)
return obj
|
MaterialsDiscovery/PyChemia
|
tests/test_code_vasp_03.py
|
Python
|
mit
| 486
|
[
"Mayavi"
] |
8cdc0ac81f0ce9d1c4095686691ee4e51fe4959e6c57ccb201648d43c644d24a
|
#!/usr/bin/python
# Copyright 2003-2010 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import print_function
__author__ = "Thomas de Grenier de Latour (tgl), " + \
"modular re-write by: Brian Dolbec (dol-sen)"
__email__ = "degrenier@easyconnect.fr, " + \
"brian.dolbec@gmail.com"
__version__ = "git"
__productname__ = "eclean"
__description__ = "A cleaning tool for Gentoo distfiles and binaries."
import os
import sys
import re
import time
import getopt
import portage
from portage.output import white, yellow, turquoise, green, teal, red
import gentoolkit.pprinter as pp
from gentoolkit.eclean.search import (DistfilesSearch,
findPackages, port_settings, pkgdir)
from gentoolkit.eclean.exclude import (parseExcludeFile,
ParseExcludeFileException)
from gentoolkit.eclean.clean import CleanUp
from gentoolkit.eclean.output import OutputControl
#from gentoolkit.eclean.dbapi import Dbapi
from gentoolkit.eprefix import EPREFIX
def printVersion():
"""Output the version info."""
print( "%s (%s) - %s" \
% (__productname__, __version__, __description__))
print()
print("Author: %s <%s>" % (__author__,__email__))
print("Copyright 2003-2009 Gentoo Foundation")
print("Distributed under the terms of the GNU General Public License v2")
def printUsage(_error=None, help=None):
"""Print help message. May also print partial help to stderr if an
error from {'options','actions'} is specified."""
out = sys.stdout
if _error:
out = sys.stderr
if not _error in ('actions', 'global-options', \
'packages-options', 'distfiles-options', \
'merged-packages-options', 'merged-distfiles-options', \
'time', 'size'):
_error = None
if not _error and not help: help = 'all'
if _error == 'time':
print( pp.error("Wrong time specification"), file=out)
print( "Time specification should be an integer followed by a"+
" single letter unit.", file=out)
print( "Available units are: y (years), m (months), w (weeks), "+
"d (days) and h (hours).", file=out)
print( "For instance: \"1y\" is \"one year\", \"2w\" is \"two"+
" weeks\", etc. ", file=out)
return
if _error == 'size':
print( pp.error("Wrong size specification"), file=out)
print( "Size specification should be an integer followed by a"+
" single letter unit.", file=out)
print( "Available units are: G, M, K and B.", file=out)
print("For instance: \"10M\" is \"ten megabytes\", \"200K\" "+
"is \"two hundreds kilobytes\", etc.", file=out)
return
if _error in ('global-options', 'packages-options', 'distfiles-options', \
'merged-packages-options', 'merged-distfiles-options',):
print( pp.error("Wrong option on command line."), file=out)
print( file=out)
elif _error == 'actions':
print( pp.error("Wrong or missing action name on command line."), file=out)
print( file=out)
print( white("Usage:"), file=out)
if _error in ('actions','global-options', 'packages-options', \
'distfiles-options') or help == 'all':
print( " "+turquoise(__productname__),
yellow("[global-option] ..."),
green("<action>"),
yellow("[action-option] ..."), file=out)
if _error == 'merged-distfiles-options' or help in ('all','distfiles'):
print( " "+turquoise(__productname__+'-dist'),
yellow("[global-option, distfiles-option] ..."), file=out)
if _error == 'merged-packages-options' or help in ('all','packages'):
print( " "+turquoise(__productname__+'-pkg'),
yellow("[global-option, packages-option] ..."), file=out)
if _error in ('global-options', 'actions'):
print( " "+turquoise(__productname__),
yellow("[--help, --version]"), file=out)
if help == 'all':
print( " "+turquoise(__productname__+"(-dist,-pkg)"),
yellow("[--help, --version]"), file=out)
if _error == 'merged-packages-options' or help == 'packages':
print( " "+turquoise(__productname__+'-pkg'),
yellow("[--help, --version]"), file=out)
if _error == 'merged-distfiles-options' or help == 'distfiles':
print( " "+turquoise(__productname__+'-dist'),
yellow("[--help, --version]"), file=out)
print(file=out)
if _error in ('global-options', 'merged-packages-options', \
'merged-distfiles-options') or help:
print( "Available global", yellow("options")+":", file=out)
print( yellow(" -C, --nocolor")+
" - turn off colors on output", file=out)
print( yellow(" -d, --deep")+
" - only keep the minimum for a reinstallation", file=out)
print( yellow(" -e, --exclude-file=<path>")+
" - path to the exclusion file", file=out)
print( yellow(" -i, --interactive")+
" - ask confirmation before deletions", file=out)
print( yellow(" -n, --package-names")+
" - protect all versions (when --deep", file=out)
print( yellow(" -p, --pretend")+
" - only display what would be cleaned", file=out)
print( yellow(" -q, --quiet")+
" - be as quiet as possible", file=out)
print( yellow(" -t, --time-limit=<time>")+
" - don't delete files modified since "+yellow("<time>"), file=out)
print( " "+yellow("<time>"), "is a duration: \"1y\" is"+
" \"one year\", \"2w\" is \"two weeks\", etc. ", file=out)
print( " "+"Units are: y (years), m (months), w (weeks), "+
"d (days) and h (hours).", file=out)
print( yellow(" -h, --help")+ \
" - display the help screen", file=out)
print( yellow(" -V, --version")+
" - display version info", file=out)
print( file=out)
if _error == 'actions' or help == 'all':
print( "Available", green("actions")+":", file=out)
print( green(" packages")+
" - clean outdated binary packages from PKGDIR", file=out)
print( green(" distfiles")+
" - clean outdated packages sources files from DISTDIR", file=out)
print( file=out)
if _error in ('packages-options','merged-packages-options') \
or help in ('all','packages'):
print( "Available", yellow("options"),"for the",
green("packages"),"action:", file=out)
print( yellow(" NONE :)"), file=out)
print( file=out)
if _error in ('distfiles-options', 'merged-distfiles-options') \
or help in ('all','distfiles'):
print("Available", yellow("options"),"for the",
green("distfiles"),"action:", file=out)
print( yellow(" -f, --fetch-restricted")+
" - protect fetch-restricted files (when --deep)", file=out)
print( yellow(" -s, --size-limit=<size>")+
" - don't delete distfiles bigger than "+yellow("<size>"), file=out)
print( " "+yellow("<size>"), "is a size specification: "+
"\"10M\" is \"ten megabytes\", \"200K\" is", file=out)
print( " "+"\"two hundreds kilobytes\", etc. Units are: "+
"G, M, K and B.", file=out)
print( file=out)
print( "More detailed instruction can be found in",
turquoise("`man %s`" % __productname__), file=out)
class ParseArgsException(Exception):
"""For parseArgs() -> main() communications."""
def __init__(self, value):
self.value = value # sdfgsdfsdfsd
def __str__(self):
return repr(self.value)
def parseSize(size):
"""Convert a file size "Xu" ("X" is an integer, and "u" in
[G,M,K,B]) into an integer (file size in Bytes).
@raise ParseArgsException: in case of failure
"""
units = {
'G': (1024**3),
'M': (1024**2),
'K': 1024,
'B': 1
}
try:
match = re.match(r"^(?P<value>\d+)(?P<unit>[GMKBgmkb])?$",size)
size = int(match.group('value'))
if match.group('unit'):
size *= units[match.group('unit').capitalize()]
except:
raise ParseArgsException('size')
return size
def parseTime(timespec):
"""Convert a duration "Xu" ("X" is an int, and "u" a time unit in
[Y,M,W,D,H]) into an integer which is a past EPOCH date.
Raises ParseArgsException('time') in case of failure.
(yep, big approximations inside... who cares?).
"""
units = {'H' : (60 * 60)}
units['D'] = units['H'] * 24
units['W'] = units['D'] * 7
units['M'] = units['D'] * 30
units['Y'] = units['D'] * 365
try:
# parse the time specification
match = re.match(r"^(?P<value>\d+)(?P<unit>[YMWDHymwdh])?$",timespec)
value = int(match.group('value'))
if not match.group('unit'): unit = 'D'
else: unit = match.group('unit').capitalize()
except:
raise ParseArgsException('time')
return time.time() - (value * units[unit])
def parseArgs(options={}):
"""Parse the command line arguments. Raise exceptions on
errors or non-action modes (help/version). Returns an action, and affect
the options dict.
"""
def optionSwitch(option,opts,action=None):
"""local function for interpreting command line options
and setting options accordingly"""
return_code = True
do_help = False
for o, a in opts:
if o in ("-h", "--help"):
do_help = True
elif o in ("-V", "--version"):
raise ParseArgsException('version')
elif o in ("-C", "--nocolor"):
options['nocolor'] = True
pp.output.nocolor()
elif o in ("-d", "--deep", "--destructive"):
options['destructive'] = True
elif o in ("-D", "--deprecated"):
options['deprecated'] = True
elif o in ("-i", "--interactive") and not options['pretend']:
options['interactive'] = True
elif o in ("-p", "--pretend"):
options['pretend'] = True
options['interactive'] = False
elif o in ("-q", "--quiet"):
options['quiet'] = True
options['verbose'] = False
elif o in ("-t", "--time-limit"):
options['time-limit'] = parseTime(a)
elif o in ("-e", "--exclude-file"):
print("cli --exclude option")
options['exclude-file'] = a
elif o in ("-n", "--package-names"):
options['package-names'] = True
elif o in ("-f", "--fetch-restricted"):
options['fetch-restricted'] = True
elif o in ("-s", "--size-limit"):
options['size-limit'] = parseSize(a)
elif o in ("-v", "--verbose") and not options['quiet']:
options['verbose'] = True
else:
return_code = False
# sanity check of --deep only options:
for opt in ('fetch-restricted', 'package-names'):
if (not options['destructive']) and options[opt]:
if not options['quiet']:
print( pp.error(
"--%s only makes sense in --deep mode." % opt), file=sys.stderr)
options[opt] = False
if do_help:
if action:
raise ParseArgsException('help-'+action)
else:
raise ParseArgsException('help')
return return_code
# here are the different allowed command line options (getopt args)
getopt_options = {'short':{}, 'long':{}}
getopt_options['short']['global'] = "CdDipqe:t:nhVv"
getopt_options['long']['global'] = ["nocolor", "deep", "destructive",
"deprecated", "interactive", "pretend", "quiet", "exclude-file=",
"time-limit=", "package-names", "help", "version", "verbose"]
getopt_options['short']['distfiles'] = "fs:"
getopt_options['long']['distfiles'] = ["fetch-restricted", "size-limit="]
getopt_options['short']['packages'] = ""
getopt_options['long']['packages'] = [""]
# set default options, except 'nocolor', which is set in main()
options['interactive'] = False
options['pretend'] = False
options['quiet'] = False
options['accept_all'] = False
options['destructive'] = False
options['deprecated'] = False
options['time-limit'] = 0
options['package-names'] = False
options['fetch-restricted'] = False
options['size-limit'] = 0
options['verbose'] = False
# if called by a well-named symlink, set the acction accordingly:
action = None
# temp print line to ensure it is the svn/branch code running, etc..
#print( "###### svn/branch/gentoolkit_eclean ####### ==> ", os.path.basename(sys.argv[0]))
if os.path.basename(sys.argv[0]).startswith(__productname__+'-pkg') or \
os.path.basename(sys.argv[0]).startswith(__productname__+'-packages'):
action = 'packages'
elif os.path.basename(sys.argv[0]).startswith(__productname__+'-dist') or \
os.path.basename(sys.argv[0]).startswith(__productname__+'distfiles'):
action = 'distfiles'
# prepare for the first getopt
if action:
short_opts = getopt_options['short']['global'] \
+ getopt_options['short'][action]
long_opts = getopt_options['long']['global'] \
+ getopt_options['long'][action]
opts_mode = 'merged-'+action
else:
short_opts = getopt_options['short']['global']
long_opts = getopt_options['long']['global']
opts_mode = 'global'
# apply getopts to command line, show partial help on failure
try:
opts, args = getopt.getopt(sys.argv[1:], short_opts, long_opts)
except:
raise ParseArgsException(opts_mode+'-options')
# set options accordingly
optionSwitch(options,opts,action=action)
# if action was already set, there should be no more args
if action and len(args):
raise ParseArgsException(opts_mode+'-options')
# if action was set, there is nothing left to do
if action:
return action
# So, we are in "eclean --foo action --bar" mode. Parse remaining args...
# Only two actions are allowed: 'packages' and 'distfiles'.
if not len(args) or not args[0] in ('packages','distfiles'):
raise ParseArgsException('actions')
action = args.pop(0)
# parse the action specific options
try:
opts, args = getopt.getopt(args, \
getopt_options['short'][action], \
getopt_options['long'][action])
except:
raise ParseArgsException(action+'-options')
# set options again, for action-specific options
optionSwitch(options,opts,action=action)
# any remaning args? Then die!
if len(args):
raise ParseArgsException(action+'-options')
# returns the action. Options dictionary is modified by side-effect.
return action
def doAction(action,options,exclude={}, output=None):
"""doAction: execute one action, ie display a few message, call the right
find* function, and then call doCleanup with its result."""
# define vocabulary for the output
if action == 'packages':
files_type = "binary packages"
else:
files_type = "distfiles"
saved = {}
deprecated = {}
# find files to delete, depending on the action
if not options['quiet']:
output.einfo("Building file list for "+action+" cleaning...")
if action == 'packages':
clean_me = findPackages(
options,
exclude=exclude,
destructive=options['destructive'],
package_names=options['package-names'],
time_limit=options['time-limit'],
pkgdir=pkgdir,
#port_dbapi=Dbapi(portage.db[portage.root]["porttree"].dbapi),
#var_dbapi=Dbapi(portage.db[portage.root]["vartree"].dbapi),
)
else:
# accept defaults
engine = DistfilesSearch(output=options['verbose-output'],
#portdb=Dbapi(portage.db[portage.root]["porttree"].dbapi),
#var_dbapi=Dbapi(portage.db[portage.root]["vartree"].dbapi),
)
clean_me, saved, deprecated = engine.findDistfiles(
exclude=exclude,
destructive=options['destructive'],
fetch_restricted=options['fetch-restricted'],
package_names=options['package-names'],
time_limit=options['time-limit'],
size_limit=options['size-limit'],
deprecate = options['deprecated']
)
# initialize our cleaner
cleaner = CleanUp( output.progress_controller)
# actually clean files if something was found
if clean_me:
# verbose pretend message
if options['pretend'] and not options['quiet']:
output.einfo("Here are the "+files_type+" that would be deleted:")
# verbose non-pretend message
elif not options['quiet']:
output.einfo("Cleaning " + files_type +"...")
# do the cleanup, and get size of deleted files
if options['pretend']:
clean_size = cleaner.pretend_clean(clean_me)
elif action in ['distfiles']:
clean_size = cleaner.clean_dist(clean_me)
elif action in ['packages']:
clean_size = cleaner.clean_pkgs(clean_me,
pkgdir)
# vocabulary for final message
if options['pretend']:
verb = "would be"
else:
verb = "were"
# display freed space
if not options['quiet']:
output.total('normal', clean_size, len(clean_me), verb, action)
# nothing was found
elif not options['quiet']:
output.einfo("Your "+action+" directory was already clean.")
if saved and not options['quiet']:
print()
print( (pp.emph(" The following ") + yellow("unavailable") +
pp.emph(" files were saved from cleaning due to exclusion file entries")))
output.set_colors('deprecated')
clean_size = cleaner.pretend_clean(saved)
output.total('deprecated', clean_size, len(saved), verb, action)
if deprecated and not options['quiet']:
print()
print( (pp.emph(" The following ") + yellow("unavailable") +
pp.emph(" installed packages were found")))
output.set_colors('deprecated')
output.list_pkgs(deprecated)
def main():
"""Parse command line and execute all actions."""
# set default options
options = {}
options['nocolor'] = (port_settings["NOCOLOR"] in ('yes','true')
or not sys.stdout.isatty())
if options['nocolor']:
pp.output.nocolor()
# parse command line options and actions
try:
action = parseArgs(options)
# filter exception to know what message to display
except ParseArgsException as e:
if e.value == 'help':
printUsage(help='all')
sys.exit(0)
elif e.value[:5] == 'help-':
printUsage(help=e.value[5:])
sys.exit(0)
elif e.value == 'version':
printVersion()
sys.exit(0)
else:
printUsage(e.value)
sys.exit(2)
output = OutputControl(options)
options['verbose-output'] = lambda x: None
if not options['quiet']:
if options['verbose']:
options['verbose-output'] = output.einfo
# parse the exclusion file
if not 'exclude-file' in options:
# set it to the default exclude file if it exists
exclude_file = "%s/etc/%s/%s.exclude" % (EPREFIX,__productname__ , action)
if os.path.isfile(exclude_file):
options['exclude-file'] = exclude_file
if 'exclude-file' in options:
try:
exclude = parseExcludeFile(options['exclude-file'],
options['verbose-output'])
except ParseExcludeFileException as e:
print( pp.error(str(e)), file=sys.stderr)
print( pp.error(
"Invalid exclusion file: %s" % options['exclude-file']), file=sys.stderr)
print( pp.error(
"See format of this file in `man %s`" % __productname__), file=sys.stderr)
sys.exit(1)
else:
exclude = {}
# security check for non-pretend mode
if not options['pretend'] and portage.secpass == 0:
print( pp.error(
"Permission denied: you must be root or belong to " +
"the portage group."), file=sys.stderr)
sys.exit(1)
# execute action
doAction(action, options, exclude=exclude,
output=output)
if __name__ == "__main__":
"""actually call main() if launched as a script"""
try:
main()
except KeyboardInterrupt:
print( "Aborted.")
sys.exit(130)
sys.exit(0)
|
dol-sen/gentoolkit
|
pym/gentoolkit/eclean/cli.py
|
Python
|
gpl-2.0
| 18,377
|
[
"Brian"
] |
3e097e164ca84423ab3413c796bb237b26d996a9f5098c71ed0343dce0a56d39
|
"""Classes to perform GridSearch on the custom kernels defined in
:mod:`kcat.kernels.functions`.
Their interface is very similar to scikit-learn's
`GridSearchCV <http://scikit-learn.org/stable/modules/generated/sklearn\
.grid_search.GridSearchCV.html#sklearn.grid_search.GridSearchCV>`_,
and the same parameters should be used.
"""
from sklearn.grid_search import GridSearchCV
from . import functions as kf
from ..utils import pgen
class BaseSearch:
"""The default `GridSearchCV` in scikit-learn searches all possible
combinations of parameters. With some kernels this is not necessary
as some combinations of parameters do not make sense
(eg: prev='f1' with post='f1').
BaseSearch is a class that can be extended to search an arbitrary
parameter space, instead of all the possible ones. This is done
by implementing the function `fit`. Any subclass should deal with
kernel-specific keyword arguments such as *alpha*, *gamma*, *prev*,
etc. Common arguments like *estimator*, *cv*, *C* and so on can be
handled by `GridSearchCV`.
"""
kernel_function = None
def __init__(self, estimator, cv, **kwargs):
self.gskwargs = {
'estimator': estimator,
'cv': cv,
'param_grid': kwargs,
'n_jobs': 4,
}
self.best_score_ = 0
self.best_params_ = {}
self.best_kparams_ = {}
self.best_estimator_ = None
self.X = None
def fit(self, X, y):
"""Fit the model to the data matrix *X* and class vector *y*.
Args:
X: Numpy matrix with the examples in rows.
y: Numpy array with the class of each example.
"""
self.X = X
G = self.kernel(X, X)
search = GridSearchCV(**self.gskwargs)
search.fit(G, y)
self.best_estimator_ = search.best_estimator_
self.best_params_ = search.best_params_
self.best_score_ = search.best_score_
if search.best_score_ >= self.best_score_:
self.best_params_ = search.best_params_
self.best_score_ = search.best_score_
self.best_estimator_ = search.best_estimator_
def predict(self, X):
"""
Args:
X: Numpy matrix with the examples in rows.
Returns:
A Numpy vector with the predicted classes.
"""
if self.X is None:
raise ValueError("Model is not fitted.")
G = self.kernel(X, self.X)
return self.best_estimator_.predict(G)
@classmethod
def kernel(cls, *args, **kwargs):
"""Calls the kernel function associated with the current class."""
if cls.kernel_function is None:
return args[0]
else:
return cls.kernel_function(*args, **kwargs)
@property
def details(self):
"""A dictionary with the found parameters and error."""
details = {
'train_score': self.best_score_,
'best_parameters': {},
}
details['best_parameters'].update(self.best_params_)
details['best_parameters'].update(self.best_kparams_)
return details
class ELKSearch(BaseSearch):
"""Finds the best parameters for :meth:`kcat.kernels.functions.elk`."""
kernel_function = kf.elk
class K0Search(BaseSearch):
"""Finds the best parameters for :meth:`kcat.kernels.functions.k0`.
Args:
functions: A list with tuples of the form ('prev', 'post').
gamma: A list of floats with the gamma values.
"""
kernel_function = kf.k0
def __init__(self, functions, gamma, **kwargs):
self.functions = functions
self.gamma = gamma
super().__init__(**kwargs)
def fit(self, X, y):
self.X = X
for prev, post in self.functions:
uses_gammas = prev == 'f1' or post in ('f1', 'f2')
for g in self.gamma if uses_gammas else [None]:
search = GridSearchCV(**self.gskwargs)
params = dict(prev=prev, post=post, gamma=g)
gram = self.kernel(X, X, **params)
search.fit(gram, y)
if search.best_score_ >= self.best_score_:
self.best_score_ = search.best_score_
self.best_params_ = search.best_params_
self.best_kparams_ = params
self.best_estimator_ = search.best_estimator_
def predict(self, X):
Y = self.X
gram = self.kernel(X, Y, **self.best_kparams_)
return self.best_estimator_.predict(gram)
class K1Search(BaseSearch):
"""Finds the best parameters for :meth:`kcat.kernels.functions.k1`.
Args:
alpha: A list of floats.
functions: A list with tuples of the form ('prev', 'post').
gamma: A list of float values.
"""
kernel_function = kf.k1
def __init__(self, alpha, functions, gamma, **kwargs):
super().__init__(**kwargs)
self.alpha = alpha
self.functions = functions
self.gamma = gamma
self.pgen = None
def fit(self, X, y):
self.X = X
self.pgen = pgen(X)
self.Xp = Xp = self.pgen(X)
for prev, post in self.functions:
uses_gammas = prev == 'f1' or post in ('f1', 'f2')
for g in self.gamma if uses_gammas else [None]:
for a in self.alpha:
search = GridSearchCV(**self.gskwargs)
params = dict(alpha=a, prev=prev, post=post, gamma=g)
gram = self.kernel(X, X, Xp, Xp, **params)
search.fit(gram, y)
if search.best_score_ >= self.best_score_:
self.best_score_ = search.best_score_
self.best_params_ = search.best_params_
self.best_kparams_ = params
self.best_estimator_ = search.best_estimator_
def predict(self, X):
Xp = self.pgen(X)
gram = self.kernel(X, self.X, Xp, self.Xp, **self.best_kparams_)
return self.best_estimator_.predict(gram)
class K2Search(BaseSearch):
"""Finds the best parameters for :meth:`kcat.kernels.functions.k2`.
Args:
functions: A list with tuples of the form ('prev', 'post').
gamma: A list of float values.
"""
kernel_function = kf.k2
def __init__(self, functions, gamma, **kwargs):
super().__init__(**kwargs)
self.functions = functions
self.gamma = gamma
self.pgen = None
def fit(self, X, y):
self.X = X
self.pgen = pgen(X)
self.Xp = Xp = self.pgen(X)
for prev, post in self.functions:
uses_gammas = prev == 'f1' or post in ('f1', 'f2')
for g in self.gamma if uses_gammas else [None]:
search = GridSearchCV(**self.gskwargs)
params = dict(prev=prev, post=post, gamma=g)
gram = self.kernel(X, X, Xp, Xp, **params)
search.fit(gram, y)
if search.best_score_ >= self.best_score_:
self.best_score_ = search.best_score_
self.best_params_ = search.best_params_
self.best_kparams_ = params
self.best_estimator_ = search.best_estimator_
def predict(self, X):
Xp = self.pgen(X)
gram = self.kernel(X, self.X, Xp, self.Xp, **self.best_kparams_)
return self.best_estimator_.predict(gram)
class M3Search(K1Search):
"""Finds the best parameters for :meth:`kcat.kernels.functions.m3`.
Args:
alpha: A list of floats.
functions: A list with tuples of the form ('prev', 'post').
gamma: A list of float values.
"""
kernel_function = kf.m3
class M4Search(K1Search):
kernel_function = kf.m4
class M5Search(K1Search):
kernel_function = kf.m5
class M6Search(K1Search):
kernel_function = kf.m6
class M7Search(K1Search):
kernel_function = kf.m7
class M8Search(K1Search):
kernel_function = kf.m8
class M9Search(K1Search):
kernel_function = kf.m9
class MASearch(K1Search):
kernel_function = kf.mA
class MBSearch(K1Search):
kernel_function = kf.mB
class MCSearch(K1Search):
kernel_function = kf.mC
class MDSearch(K1Search):
kernel_function = kf.mD
class MESearch(K1Search):
kernel_function = kf.mE
class RBFSearch(BaseSearch):
pass
class CHI1Search(BaseSearch):
kernel_function = kf.chi1
class CHI2Search(BaseSearch):
kernel_function = kf.chi2
|
Alkxzv/categorical-kernels
|
kcat/kernels/search.py
|
Python
|
mit
| 8,564
|
[
"Elk"
] |
0ce1bf026f1f97a1adf136384bf97ed23e865a2e801e932d80db349ce22bb2b6
|
"""
Example of plotting a 3D vector field
There are still problems here with getting the angles exactly right....
"""
# set up some data to plot
from Numeric import *
dim = 10
# initialise the positions of the vectors
x = zeros((dim,dim), typecode=Float)
y = zeros((dim,dim), typecode=Float)
z = zeros((dim,dim), typecode=Float)
# initialise the vector displacements
# (I may need to rethink how this works in the interface)
dx = zeros((dim,dim), typecode=Float)
dy = zeros((dim,dim), typecode=Float)
dz = zeros((dim,dim), typecode=Float)
# set the positions randomly, and set the displacements to some smaller
# random number but of mean zero instead of distributed between 0 and 1
import random
random.seed()
for i in range(dim):
for j in range(dim):
x[i,j] = random.random()
y[i,j] = random.random()
z[i,j] = random.random()
dx[i,j] = (random.random()-0.5)/5.0
dy[i,j] = (random.random()-0.5)/5.0
dz[i,j] = (random.random()-0.5)/5.0
#### original povray code
import vtk
import os, sys, re, math
from Numeric import *
# read in the file
reader = vtk.vtkXMLUnstructuredGridReader()
reader.SetFileName("../../vel-0500.vtk")
reader.Update()
# get the grid
grid = reader.GetOutput()
# grab the model centre and bounds
centre = grid.GetCenter()
bounds = grid.GetBounds()
# try and extract the vector norm
norm = vtk.vtkVectorNorm()
norm.SetInput(grid)
maxNorm = grid.GetPointData().GetVectors().GetMaxNorm()
### extract the relevant grid data
# the points
points = grid.GetPoints()
numPoints = points.GetNumberOfPoints()
x = zeros(numPoints, typecode=Float)
y = zeros(numPoints, typecode=Float)
z = zeros(numPoints, typecode=Float)
for i in range(numPoints):
x[i], y[i], z[i] = points.GetPoint(i)
# the data at the points
data = grid.GetPointData().GetVectors()
vx = zeros(numPoints, typecode=Float)
vy = zeros(numPoints, typecode=Float)
vz = zeros(numPoints, typecode=Float)
vNorm = zeros(numPoints, typecode=Float)
for i in range(numPoints):
vx[i], vy[i], vz[i] = data.GetTuple3(i)
vNorm[i] = math.sqrt(vx[i]*vx[i] + vy[i]*vy[i] + vz[i]*vz[i])
# make a lookup table for the colour map and invert it (colours look
# better when it's inverted)
lut = vtk.vtkLookupTable()
refLut = vtk.vtkLookupTable()
lut.Build()
refLut.Build()
for j in range(256):
lut.SetTableValue(j, refLut.GetTableValue(255-j))
# get the colours
r = zeros(numPoints, typecode=Float)
g = zeros(numPoints, typecode=Float)
b = zeros(numPoints, typecode=Float)
for i in range(numPoints):
r[i], g[i], b[i] = lut.GetColor(vNorm[i]/maxNorm)
### generate the pov file
pov = open("arrowPlot3D.pov", "w")
pov.write("#include \"colors.inc\"\n")
pov.write("#include \"shapes.inc\"\n")
pov.write("#include \"textures.inc\"\n")
pov.write("camera {\n")
pov.write(" location <%f, %f, -2.5>\n" % (centre[0], centre[1]))
pov.write(" look_at <%f, %f, -%f>\n" % (centre[0], centre[1], centre[2]))
pov.write("}\n")
pov.write("light_source {\n")
pov.write(" <0, 0, -3>\n")
pov.write(" colour White\n")
pov.write("}\n")
pov.write("#declare Arrow = union {\n")
pov.write(" cone {\n")
pov.write(" <0, 0, 0>, 0.3\n")
pov.write(" <1, 0, 0>, 0.0\n")
pov.write(" }\n")
pov.write(" cylinder {\n")
pov.write(" <-1, 0, 0>\n")
pov.write(" <0, 0, 0>,\n")
pov.write(" 0.15\n")
pov.write(" }\n")
pov.write("}\n")
for i in range(numPoints):
pov.write("object {\n")
scale = 0.05*vNorm[i]/maxNorm
if scale < 1e-8:
scale = 1e-7
pov.write(" Arrow scale %g " % scale)
pov.write("rotate <%f, %f, %f> " % (vx[i], vy[i], vz[i]))
pov.write("translate <%f, %f, -%f> " % (x[i], y[i], z[i]))
pov.write("pigment { colour <%f, %f, %f> }\n" % (r[i], g[i], b[i]))
pov.write("}\n")
pov.close()
### generate the ini file
# open the ini file to write to
ini = open("arrowPlot3D.ini", "w")
# the output resolution
ini.write("Width=640\n")
ini.write("Height=480\n")
# anti-aliasing settings
ini.write("Antialias=on\n")
# generate png files
ini.write("Output_File_Type=N\n")
# the name of the input pov file
ini.write("Input_File_Name=arrowPlot3D.pov\n")
# pause when done
ini.write("Pause_When_Done=on\n")
# close the file
ini.close()
# run povray on the file
result = os.system("povray arrowPlot3D.ini")
if result != 0:
raise SystemError, "Povray execution failed"
else:
# clean up a bit
os.unlink("arrowPlot3D.pov")
os.unlink("arrowPlot3D.ini")
# vim: expandtab shiftwidth=4:
|
paultcochrane/pyvisi
|
examples/renderers/povray/arrowPlot3D.py
|
Python
|
gpl-2.0
| 4,475
|
[
"VTK"
] |
f295a761d13661a6177e8c2e126d9c98026014469b60b3d44cefc28397041b33
|
#!/usr/bin/python3
import gi
gi.require_version('Cvc', '1.0')
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Cvc, GdkPixbuf, Gio
from SettingsWidgets import SidePage, GSettingsSoundFileChooser
from xapp.GSettingsWidgets import *
CINNAMON_SOUNDS = "org.cinnamon.sounds"
CINNAMON_DESKTOP_SOUNDS = "org.cinnamon.desktop.sound"
MAXIMUM_VOLUME_KEY = "maximum-volume"
DECAY_STEP = .15
EFFECT_LIST = [
{"label": _("Starting Cinnamon"), "schema": CINNAMON_SOUNDS, "file": "login-file", "enabled": "login-enabled"},
{"label": _("Leaving Cinnamon"), "schema": CINNAMON_SOUNDS, "file": "logout-file", "enabled": "logout-enabled"},
{"label": _("Switching workspace"), "schema": CINNAMON_SOUNDS, "file": "switch-file", "enabled": "switch-enabled"},
{"label": _("Opening new windows"), "schema": CINNAMON_SOUNDS, "file": "map-file", "enabled": "map-enabled"},
{"label": _("Closing windows"), "schema": CINNAMON_SOUNDS, "file": "close-file", "enabled": "close-enabled"},
{"label": _("Minimizing windows"), "schema": CINNAMON_SOUNDS, "file": "minimize-file", "enabled": "minimize-enabled"},
{"label": _("Maximizing windows"), "schema": CINNAMON_SOUNDS, "file": "maximize-file", "enabled": "maximize-enabled"},
{"label": _("Unmaximizing windows"), "schema": CINNAMON_SOUNDS, "file": "unmaximize-file", "enabled": "unmaximize-enabled"},
{"label": _("Tiling and snapping windows"), "schema": CINNAMON_SOUNDS, "file": "tile-file", "enabled": "tile-enabled"},
{"label": _("Inserting a device"), "schema": CINNAMON_SOUNDS, "file": "plug-file", "enabled": "plug-enabled"},
{"label": _("Removing a device"), "schema": CINNAMON_SOUNDS, "file": "unplug-file", "enabled": "unplug-enabled"},
{"label": _("Showing notifications"), "schema": CINNAMON_SOUNDS, "file": "notification-file", "enabled": "notification-enabled"},
{"label": _("Changing the sound volume"), "schema": CINNAMON_DESKTOP_SOUNDS, "file": "volume-sound-file", "enabled": "volume-sound-enabled"}
]
SOUND_TEST_MAP = [
# name, position, icon name, row, col, pa id
[_("Front Left"), "front-left", "audio-speaker-left", 0, 0, 1],
[_("Front Right"), "front-right", "audio-speaker-right", 0, 2, 2],
[_("Front Center"), "front-center", "audio-speaker-center", 0, 1, 3],
[_("Rear Left"), "rear-left", "audio-speaker-left-back", 2, 0, 5],
[_("Rear Right"), "rear-right", "audio-speaker-right-back", 2, 2, 6],
[_("Rear Center"), "rear-center", "audio-speaker-center-back", 2, 1, 4],
[_("Subwoofer"), "lfe", "audio-subwoofer", 1, 1, 7],
[_("Side Left"), "side-left", "audio-speaker-left-side", 1, 0, 10],
[_("Side Right"), "side-right", "audio-speaker-right-side", 1, 2, 11]
]
def list_header_func(row, before, user_data):
if before and not row.get_header():
row.set_header(Gtk.Separator(orientation=Gtk.Orientation.HORIZONTAL))
class SoundBox(Gtk.Box):
def __init__(self, title):
Gtk.Box.__init__(self)
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(5)
label = Gtk.Label()
label.set_markup("<b>%s</b>" % title)
label.set_xalign(0.0)
self.add(label)
frame = Gtk.Frame()
frame.set_shadow_type(Gtk.ShadowType.IN)
frame_style = frame.get_style_context()
frame_style.add_class("view")
self.pack_start(frame, True, True, 0)
main_box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
frame.add(main_box)
scw = Gtk.ScrolledWindow()
scw.expand = True
scw.set_min_content_height (450)
scw.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
scw.set_shadow_type(Gtk.ShadowType.NONE)
main_box.pack_start(scw, True, True, 0)
self.box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
scw.add(self.box)
self.list_box = Gtk.ListBox()
self.list_box.set_selection_mode(Gtk.SelectionMode.NONE)
self.list_box.set_header_func(list_header_func, None)
self.box.add(self.list_box)
def add_row(self, row):
self.list_box.add(row)
class Slider(SettingsWidget):
def __init__(self, title, minLabel, maxLabel, minValue, maxValue, sizeGroup, step=None, page=None, value=0, gicon=None, iconName=None):
super(Slider, self).__init__()
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(5)
self.set_margin_bottom(5)
if sizeGroup == None:
sizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
if step == None:
step = (maxValue - minValue) / 100
if page == None:
page = (maxValue - minValue) / 10
self.adjustment = Gtk.Adjustment.new(value, minValue, maxValue, step, page, 0)
topBox = Gtk.Box()
self.leftBox = Gtk.Box()
self.rightBox = Gtk.Box()
topGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
topGroup.add_widget(self.leftBox)
topGroup.add_widget(self.rightBox)
# add label and icon (if specified)
labelBox = Gtk.Box(spacing=5)
if gicon != None:
appIcon = Gtk.Image.new_from_gicon(gicon, 2)
labelBox.pack_start(appIcon, False, False, 0)
elif iconName != None:
appIcon = Gtk.Image.new_from_icon_name(iconName, 2)
labelBox.pack_start(appIcon, False, False, 0)
self.label = Gtk.Label(title)
labelBox.pack_start(self.label, False, False, 0)
labelBox.set_halign(Gtk.Align.CENTER)
topBox.pack_start(self.leftBox, False, False, 0)
topBox.pack_start(labelBox, True, True, 0)
topBox.pack_start(self.rightBox, False, False, 0)
# add scale
sliderBox = Gtk.Box()
self.slider = Gtk.Scale.new(Gtk.Orientation.HORIZONTAL, self.adjustment)
self.slider.props.draw_value = False
min_label= Gtk.Label()
max_label = Gtk.Label()
min_label.set_alignment(1.0, 0.75)
max_label.set_alignment(0.0, 0.75)
min_label.set_margin_right(6)
max_label.set_margin_left(6)
min_label.set_markup("<i><small>%s</small></i>" % minLabel)
max_label.set_markup("<i><small>%s</small></i>" % maxLabel)
sizeGroup.add_widget(min_label)
sizeGroup.add_widget(max_label)
sliderBox.pack_start(min_label, False, False, 0)
sliderBox.pack_start(self.slider, True, True, 0)
sliderBox.pack_start(max_label, False, False, 0)
self.pack_start(topBox, False, False, 0)
self.pack_start(sliderBox, False, False, 0)
self.show_all()
def setMark(self, val):
self.slider.add_mark(val, Gtk.PositionType.TOP, "")
class VolumeBar(Slider):
def __init__(self, normVolume, maxPercent, title=_("Volume: "), gicon=None, sizeGroup=None):
self.normVolume = normVolume
self.volume = 0
self.isMuted = False
self.baseTitle = title
self.stream = None
self.mutedHandlerId = 0
self.volumeHandlerId = 0
super(VolumeBar, self).__init__(title, _("Softer"), _("Louder"), 0, maxPercent, sizeGroup, 1, 5, 0, gicon)
self.set_spacing(0)
self.set_border_width(2)
self.set_margin_left(23)
self.set_margin_right(23)
self.slider.set_sensitive(False)
self.muteImage = Gtk.Image.new_from_icon_name("audio-volume-muted-symbolic", 1)
self.muteSwitch = Gtk.ToggleButton()
self.muteSwitch.set_image(self.muteImage)
self.muteSwitch.set_relief(Gtk.ReliefStyle.NONE)
self.muteSwitch.set_active(False)
self.muteSwitch.set_sensitive(False)
self.leftBox.pack_start(self.muteSwitch, False, False, 0)
if maxPercent > 100:
self.setMark(100)
self.muteSwitchHandlerId = self.muteSwitch.connect("clicked", self.toggleMute)
self.adjustmentHandlerId = self.adjustment.connect("value-changed", self.onVolumeChanged)
def connectStream(self):
self.mutedHandlerId = self.stream.connect("notify::is-muted", self.setVolume)
self.volumeHandlerId = self.stream.connect("notify::volume", self.setVolume)
self.setVolume(None, None)
def disconnectStream(self):
if self.mutedHandlerId > 0:
self.stream.disconnect(self.mutedHandlerId)
self.mutedHandlerId = 0
if self.volumeHandlerId > 0:
self.stream.disconnect(self.volumeHandlerId)
self.volumeHandlerId = 0
def setStream(self, stream):
if self.stream and stream != self.stream:
self.disconnectStream()
self.stream = stream
self.connectStream()
self.slider.set_sensitive(True)
self.muteSwitch.set_sensitive(True)
def setVolume(self, a, b):
if self.stream.get_is_muted():
newVolume = 0
self.isMuted = True
else:
newVolume = int(round(self.stream.props.volume / self.normVolume * 100))
self.isMuted = False
self.volume = newVolume
self.adjustment.handler_block(self.adjustmentHandlerId)
self.adjustment.set_value(newVolume)
self.adjustment.handler_unblock(self.adjustmentHandlerId)
self.updateStatus()
def onVolumeChanged(self, adjustment):
newVolume = int(round(self.adjustment.get_value()))
muted = newVolume == 0
self.volume = newVolume
self.stream.handler_block(self.volumeHandlerId)
self.stream.set_volume(newVolume * self.normVolume / 100)
self.stream.push_volume()
self.stream.handler_unblock(self.volumeHandlerId)
if self.stream.get_is_muted() != muted:
self.setMuted(muted)
self.updateStatus()
def setMuted(self, muted):
self.isMuted = muted
self.stream.change_is_muted(muted)
def toggleMute(self, a=None):
self.setMuted(not self.isMuted)
def updateStatus(self):
self.muteSwitch.handler_block(self.muteSwitchHandlerId)
self.muteSwitch.set_active(self.isMuted)
self.muteSwitch.handler_unblock(self.muteSwitchHandlerId)
if self.isMuted:
self.muteImage.set_from_icon_name("audio-volume-muted-symbolic", 1)
self.label.set_label(self.baseTitle + _("Muted"))
self.muteSwitch.set_tooltip_text(_("Click to unmute"))
else:
self.muteImage.set_from_icon_name("audio-volume-high-symbolic", 1)
self.label.set_label(self.baseTitle + str(self.volume) + "%")
self.muteSwitch.set_tooltip_text(_("Click to mute"))
class BalanceBar(Slider):
def __init__(self, type, minVal = -1, norm = 1, sizeGroup=None):
self.type = type
self.norm = norm
self.value = 0
if type == "balance":
title = _("Balance")
minLabel = _("Left")
maxLabel = _("Right")
elif type == "fade":
title = _("Fade")
minLabel = _("Rear")
maxLabel = _("Front")
elif type == "lfe":
title = _("Subwoofer")
minLabel = _("Soft")
maxLabel = _("Loud")
super(BalanceBar, self).__init__(title, minLabel, maxLabel, minVal, 1, sizeGroup, (1-minVal)/20.)
self.setMark(0)
self.slider.props.has_origin = False
self.adjustment.connect("value-changed", self.onLevelChanged)
def setChannelMap(self, channelMap):
self.channelMap = channelMap
self.channelMap.connect("volume-changed", self.getLevel)
self.set_sensitive(getattr(self.channelMap, "can_"+self.type)())
self.getLevel()
def getLevel(self, a=None, b=None):
value = round(getattr(self.channelMap, "get_"+self.type)(), 3)
if self.type == "lfe":
value = value / self.norm
if value == self.value:
return
self.value = value
self.adjustment.set_value(self.value)
def onLevelChanged(self, adjustment):
value = round(self.adjustment.get_value(), 3)
if self.value == value:
return
self.value = value
if self.type == "lfe":
value = value * self.norm
getattr(self.channelMap, "set_"+self.type)(value)
class VolumeLevelBar(SettingsWidget):
def __init__(self, sizeGroup):
super(VolumeLevelBar, self).__init__()
self.set_orientation(Gtk.Orientation.VERTICAL)
self.set_spacing(5)
self.lastPeak = 0
self.monitorId = None
self.stream = None
self.pack_start(Gtk.Label(_("Input level")), False, False, 0)
levelBox = Gtk.Box()
self.levelBar = Gtk.LevelBar()
leftPadding = Gtk.Box()
sizeGroup.add_widget(leftPadding)
rightPadding = Gtk.Box()
sizeGroup.add_widget(rightPadding)
levelBox.pack_start(leftPadding, False, False, 0)
levelBox.pack_start(self.levelBar, True, True, 0)
levelBox.pack_start(rightPadding, False, False, 0)
self.pack_start(levelBox, False, False, 5)
self.levelBar.set_min_value(0)
def setStream(self, stream):
if self.stream != None:
self.stream.remove_monitor()
self.stream.disconnect(self.monitorId)
self.stream = stream
self.stream.create_monitor()
self.monitorId = self.stream.connect("monitor-update", self.update)
def update(self, stream, value):
if self.lastPeak >= DECAY_STEP and value < self.lastPeak - DECAY_STEP:
value = self.lastPeak - DECAY_STEP
self.lastPeak = value
self.levelBar.set_value(value)
class ProfileSelector(SettingsWidget):
def __init__(self, controller):
super(ProfileSelector, self).__init__()
self.controller = controller
self.model = Gtk.ListStore(str, str)
self.combo = Gtk.ComboBox()
self.combo.set_model(self.model)
render = Gtk.CellRendererText()
self.combo.pack_start(render, True)
self.combo.add_attribute(render, "text", 1)
self.combo.set_id_column(0)
self.pack_start(Gtk.Label(_("Output profile")), False, False, 0)
button = Gtk.Button.new_with_label(_("Test sound"))
self.pack_end(button, False, False, 0)
self.pack_end(self.combo, False, False, 0)
button.connect("clicked", self.testSpeakers)
self.combo.connect("changed", self.onProfileSelect)
def setDevice(self, device):
self.device = device
# set the available output profiles in the combo box
profiles = device.get_profiles()
self.model.clear()
for profile in profiles:
self.model.append([profile.profile, profile.human_profile])
self.profile = device.get_active_profile()
self.combo.set_active_id(self.profile)
def onProfileSelect(self, a):
newProfile = self.combo.get_active_id()
if newProfile != self.profile and newProfile != None:
self.profile = newProfile
self.controller.change_profile_on_selected_device(self.device, newProfile)
def testSpeakers(self, a):
SoundTest(a.get_toplevel(), self.controller.get_default_sink())
class Effect(GSettingsSoundFileChooser):
def __init__(self, info, sizeGroup):
super(Effect, self).__init__(info["label"], info["schema"], info["file"])
self.enabled_key = info["enabled"]
self.enabled_switch = Gtk.Switch()
self.pack_end(self.enabled_switch, False, False, 0)
self.reorder_child(self.enabled_switch, 1)
sizeGroup.add_widget(self.content_widget)
self.settings.bind(self.enabled_key, self.enabled_switch, "active", Gio.SettingsBindFlags.DEFAULT)
class SoundTest(Gtk.Dialog):
def __init__(self, parent, stream):
Gtk.Dialog.__init__(self, _("Test Sound"), parent)
self.stream = stream
self.positions = []
grid = Gtk.Grid()
grid.set_column_spacing(75)
grid.set_row_spacing(75)
grid.set_column_homogeneous(True)
grid.set_row_homogeneous(True)
sizeGroup = Gtk.SizeGroup(Gtk.SizeGroupMode.BOTH)
index = 0
for position in SOUND_TEST_MAP:
container = Gtk.Box()
button = Gtk.Button()
sizeGroup.add_widget(button)
button.set_relief(Gtk.ReliefStyle.NONE)
box = Gtk.Box.new(Gtk.Orientation.VERTICAL, 0)
button.add(box)
icon = Gtk.Image.new_from_icon_name(position[2], Gtk.IconSize.DIALOG)
box.pack_start(icon, False, False, 0)
box.pack_start(Gtk.Label(position[0]), False, False, 0)
info = {"index":index, "icon":icon, "button":button}
button.connect("clicked", self.test, info)
container.add(button)
grid.attach(container, position[4], position[3], 1, 1)
index = index + 1
self.positions.append(info)
content_area = self.get_content_area()
content_area.set_border_width(12)
content_area.add(grid)
button = Gtk.Button.new_from_stock("gtk-close")
button.connect("clicked", self._destroy)
content_area.add(button)
self.show_all()
self.setPositionHideState()
def _destroy(self, widget):
self.destroy()
def test(self, b, info):
position = SOUND_TEST_MAP[info["index"]]
if position[1] == "lfe":
sound = "audio-test-signal"
else:
sound = "audio-channel-"+position[1]
try:
connection = Gio.bus_get_sync(Gio.BusType.SESSION, None)
connection.call_sync("org.cinnamon.SettingsDaemon.Sound",
"/org/cinnamon/SettingsDaemon/Sound",
"org.cinnamon.SettingsDaemon.Sound",
"PlaySoundWithChannel",
GLib.Variant("(uss)", (0, sound, position[1])),
None,
Gio.DBusCallFlags.NONE,
2000,
None)
except GLib.Error as e:
print("Could not play test sound: %s" % e.message)
def setPositionHideState(self):
map = self.stream.get_channel_map()
for position in self.positions:
index = position["index"]
if map.has_position(SOUND_TEST_MAP[index][5]):
position["button"].show()
else:
position["button"].hide()
class Module:
name = "sound"
category = "hardware"
comment = _("Manage sound settings")
def __init__(self, content_box):
keywords = _("sound, media, music, speakers, audio, microphone, headphone")
self.sidePage = SidePage(_("Sound"), "cs-sound", keywords, content_box, module=self)
self.sound_settings = Gio.Settings(CINNAMON_DESKTOP_SOUNDS)
def on_module_selected(self):
if not self.loaded:
print("Loading Sound module")
self.outputDeviceList = Gtk.ListStore(str, # name
str, # device
bool, # active
int, # id
GdkPixbuf.Pixbuf) # icon
self.inputDeviceList = Gtk.ListStore(str, # name
str, # device
bool, # active
int, # id
GdkPixbuf.Pixbuf) # icon
self.appList = {}
self.inializeController()
self.buildLayout()
self.checkAppState()
self.checkInputState()
def buildLayout(self):
self.sidePage.stack = SettingsStack()
self.sidePage.add_widget(self.sidePage.stack)
## Output page
page = SettingsPage()
self.sidePage.stack.add_titled(page, "output", _("Output"))
self.outputSelector = self.buildDeviceSelect("output", self.outputDeviceList)
outputSection = page.add_section(_("Device"))
outputSection.add_row(self.outputSelector)
devSettings = page.add_section(_("Device settings"))
# output profiles
self.profile = ProfileSelector(self.controller)
devSettings.add_row(self.profile)
sizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
# ouput volume
max_volume = self.sound_settings.get_int(MAXIMUM_VOLUME_KEY)
self.outVolume = VolumeBar(self.controller.get_vol_max_norm(), max_volume, sizeGroup=sizeGroup)
devSettings.add_row(self.outVolume)
# balance
self.balance = BalanceBar("balance", sizeGroup=sizeGroup)
devSettings.add_row(self.balance)
self.fade = BalanceBar("fade", sizeGroup=sizeGroup)
devSettings.add_row(self.fade)
self.woofer = BalanceBar("lfe", 0, self.controller.get_vol_max_norm(), sizeGroup=sizeGroup)
devSettings.add_row(self.woofer)
## Input page
page = SettingsPage()
self.sidePage.stack.add_titled(page, "input", _("Input"))
self.inputStack = Gtk.Stack()
page.pack_start(self.inputStack, True, True, 0)
inputBox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=15)
self.inputSelector = self.buildDeviceSelect("input", self.inputDeviceList)
deviceSection = SettingsSection("Device")
inputBox.pack_start(deviceSection, False, False, 0)
deviceSection.add_row(self.inputSelector)
devSettings = SettingsSection(_("Device settings"))
inputBox.pack_start(devSettings, False, False, 0)
sizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
# input volume
self.inVolume = VolumeBar(self.controller.get_vol_max_norm(), max_volume, sizeGroup=sizeGroup)
devSettings.add_row(self.inVolume)
# input level
self.inLevel = VolumeLevelBar(sizeGroup)
devSettings.add_row(self.inLevel)
self.inputStack.add_named(inputBox, "inputBox")
noInputsMessage = Gtk.Box()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=12)
image = Gtk.Image.new_from_icon_name("action-unavailable-symbolic", Gtk.IconSize.DIALOG)
image.set_pixel_size(96)
box.pack_start(image, False, False, 0)
box.set_valign(Gtk.Align.CENTER)
label = Gtk.Label(_("No inputs sources are currently available."))
box.pack_start(label, False, False, 0)
noInputsMessage.pack_start(box, True, True, 0)
self.inputStack.add_named(noInputsMessage, "noInputsMessage")
self.inputStack.show_all()
## Sounds page
page = SettingsPage()
self.sidePage.stack.add_titled(page, "sounds", _("Sounds"))
soundsVolumeSection = page.add_section(_("Sounds Volume"))
self.soundsVolume = VolumeBar(self.controller.get_vol_max_norm(), 100)
soundsVolumeSection.add_row(self.soundsVolume)
soundsSection = SoundBox(_("Sounds"))
page.pack_start(soundsSection, True, True, 0)
sizeGroup = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
for effect in EFFECT_LIST:
soundsSection.add_row(Effect(effect, sizeGroup))
## Applications page
page = SettingsPage()
self.sidePage.stack.add_titled(page, "applications", _("Applications"))
self.appStack = Gtk.Stack()
page.pack_start(self.appStack, True, True, 0)
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
self.appSettings = SoundBox(_("Applications"))
box.pack_start(self.appSettings, True, True, 0)
self.appStack.add_named(box, "appSettings")
noAppsMessage = Gtk.Box()
box = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=12)
image = Gtk.Image.new_from_icon_name("action-unavailable-symbolic", Gtk.IconSize.DIALOG)
image.set_pixel_size(96)
box.pack_start(image, False, False, 0)
box.set_valign(Gtk.Align.CENTER)
label = Gtk.Label(_("No application is currently playing or recording audio."))
box.pack_start(label, False, False, 0)
noAppsMessage.pack_start(box, True, True, 0)
self.appStack.add_named(noAppsMessage, "noAppsMessage")
## Settings page
page = SettingsPage()
self.sidePage.stack.add_titled(page, "settings", _("Settings"))
amplificationSection = page.add_section(_("Amplification"))
self.maxVolume = Slider(_("Maximum volume: %d") % max_volume + "%", _("Reduced"), _("Amplified"), 1, 150, None, step=1, page=10, value=max_volume, gicon=None, iconName=None)
self.maxVolume.adjustment.connect("value-changed", self.onMaxVolumeChanged)
self.maxVolume.setMark(100)
amplificationSection.add_row(self.maxVolume)
def onMaxVolumeChanged(self, adjustment):
newValue = int(round(adjustment.get_value()))
self.sound_settings.set_int(MAXIMUM_VOLUME_KEY, newValue)
self.maxVolume.label.set_label(_("Maximum volume: %d") % newValue + "%")
self.outVolume.adjustment.set_upper(newValue)
self.outVolume.slider.clear_marks()
if (newValue > 100):
self.outVolume.setMark(100)
def inializeController(self):
self.controller = Cvc.MixerControl(name = "cinnamon")
self.controller.connect("state-changed", self.setChannelMap)
self.controller.connect("output-added", self.deviceAdded, "output")
self.controller.connect("input-added", self.deviceAdded, "input")
self.controller.connect("output-removed", self.deviceRemoved, "output")
self.controller.connect("input-removed", self.deviceRemoved, "input")
self.controller.connect("active-output-update", self.activeOutputUpdate)
self.controller.connect("active-input-update", self.activeInputUpdate)
self.controller.connect("default-sink-changed", self.defaultSinkChanged)
self.controller.connect("default-source-changed", self.defaultSourceChanged)
self.controller.connect("stream-added", self.streamAdded)
self.controller.connect("stream-removed", self.streamRemoved)
self.controller.open()
def buildDeviceSelect(self, type, model):
select = Gtk.IconView.new_with_model(model)
select.set_margin(0)
select.set_pixbuf_column(4)
select.set_text_column(0)
select.set_column_spacing(0)
select.connect("selection-changed", self.setActiveDevice, type)
return select
def setActiveDevice(self, view, type):
selected = view.get_selected_items()
if len(selected) == 0:
return
model = view.get_model()
newDeviceId = model.get_value(model.get_iter(selected[0]), 3)
newDevice = getattr(self.controller, "lookup_"+type+"_id")(newDeviceId)
if newDevice != None and newDeviceId != getattr(self, type+"Id"):
getattr(self.controller, "change_"+type)(newDevice)
self.profile.setDevice(newDevice)
def deviceAdded(self, c, id, type):
device = getattr(self.controller, "lookup_"+type+"_id")(id)
iconTheme = Gtk.IconTheme.get_default()
gicon = device.get_gicon()
iconName = device.get_icon_name()
icon = None
if gicon is not None:
lookup = iconTheme.lookup_by_gicon(gicon, 32, 0)
if lookup is not None:
icon = lookup.load_icon()
if icon is None:
if (iconName is not None and "bluetooth" in iconName):
icon = iconTheme.load_icon("bluetooth", 32, 0)
else:
icon = iconTheme.load_icon("audio-card", 32, 0)
getattr(self, type+"DeviceList").append([device.get_description() + "\n" + device.get_origin(), "", False, id, icon])
if type == "input":
self.checkInputState()
def deviceRemoved(self, c, id, type):
store = getattr(self, type+"DeviceList")
for row in store:
if row[3] == id:
store.remove(row.iter)
if type == "input":
self.checkInputState()
return
def checkInputState(self):
if len(self.inputDeviceList) == 0:
self.inputStack.set_visible_child_name("noInputsMessage")
else:
self.inputStack.set_visible_child_name("inputBox")
def activeOutputUpdate(self, c, id):
self.outputId = id
device = self.controller.lookup_output_id(id)
self.profile.setDevice(device)
# select current device in device selector
i = 0
for row in self.outputDeviceList:
if row[3] == id:
self.outputSelector.select_path(Gtk.TreePath.new_from_string(str(i)))
i = i + 1
self.setChannelMap()
def activeInputUpdate(self, c, id):
self.inputId = id
# select current device in device selector
i = 0
for row in self.inputDeviceList:
if row[3] == id:
self.inputSelector.select_path(Gtk.TreePath.new_from_string(str(i)))
i = i + 1
def defaultSinkChanged(self, c, id):
defaultSink = self.controller.get_default_sink()
if defaultSink == None:
return
self.outVolume.setStream(defaultSink)
self.setChannelMap()
def defaultSourceChanged(self, c, id):
defaultSource = self.controller.get_default_source()
if defaultSource == None:
return
self.inVolume.setStream(defaultSource)
self.inLevel.setStream(defaultSource)
def setChannelMap(self, a=None, b=None):
if self.controller.get_state() == Cvc.MixerControlState.READY:
channelMap = self.controller.get_default_sink().get_channel_map()
self.balance.setChannelMap(channelMap)
self.fade.setChannelMap(channelMap)
self.woofer.setChannelMap(channelMap)
def streamAdded(self, c, id):
stream = self.controller.lookup_stream_id(id)
if stream in self.controller.get_sink_inputs():
name = stream.props.name
# FIXME: We use to filter out by PA_PROP_APPLICATION_ID. But
# most streams report this as null now... why??
if name in ("speech-dispatcher", "libcanberra"):
# speech-dispatcher: orca/speechd/spd-say
# libcanberra: cinnamon effects, test sounds
return
if id in self.appList.keys():
# Don't add an input more than once
return
if name == None:
name = _("Unknown")
label = "%s: " % name
self.appList[id] = VolumeBar(self.controller.get_vol_max_norm(),
100,
label,
stream.get_gicon())
self.appList[id].setStream(stream)
self.appSettings.add_row(self.appList[id])
self.appSettings.list_box.invalidate_headers()
self.appSettings.show_all()
elif stream == self.controller.get_event_sink_input():
self.soundsVolume.setStream(stream)
self.checkAppState()
def streamRemoved(self, c, id):
if id in self.appList:
self.appList[id].get_parent().destroy()
self.appSettings.list_box.invalidate_headers()
del self.appList[id]
self.checkAppState()
def checkAppState(self):
if len(self.appList) == 0:
self.appStack.set_visible_child_name("noAppsMessage")
else:
self.appStack.set_visible_child_name("appSettings")
|
glls/Cinnamon
|
files/usr/share/cinnamon/cinnamon-settings/modules/cs_sound.py
|
Python
|
gpl-2.0
| 32,421
|
[
"ORCA"
] |
388fabf8b0ce13f4362571d7223701ac6e9fe2550328273e517059440b6f6357
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Array of data parsers for bioinformatics file formats, such as: GFF3, BED, SAM/BAM, VCF, PSL, AGP, FASTA/FASTQ, BLAST, etc.
"""
from jcvi.apps.base import dmain
if __name__ == "__main__":
dmain(__file__)
|
tanghaibao/jcvi
|
jcvi/formats/__main__.py
|
Python
|
bsd-2-clause
| 262
|
[
"BLAST"
] |
aa010f3240c71b0bf121a2fec26f4d981c7fbe7b7a79471d9013e576d70ab043
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Densitymapbox(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "densitymapbox"
_valid_props = {
"autocolorscale",
"below",
"coloraxis",
"colorbar",
"colorscale",
"customdata",
"customdatasrc",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"lat",
"latsrc",
"legendgroup",
"lon",
"lonsrc",
"meta",
"metasrc",
"name",
"opacity",
"radius",
"radiussrc",
"reversescale",
"showlegend",
"showscale",
"stream",
"subplot",
"text",
"textsrc",
"type",
"uid",
"uirevision",
"visible",
"z",
"zauto",
"zmax",
"zmid",
"zmin",
"zsrc",
}
# autocolorscale
# --------------
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
# below
# -----
@property
def below(self):
"""
Determines if the densitymapbox trace will be inserted before
the layer with the specified ID. By default, densitymapbox
traces are placed below the first layer of type symbol If set
to '', the layer will be inserted above every existing layer.
The 'below' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["below"]
@below.setter
def below(self, val):
self["below"] = val
# coloraxis
# ---------
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
# colorbar
# --------
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Supported dict properties:
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format
We add one item to d3's date formatter: "%{n}f"
for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with
tickformat "%H~%M~%S.%2f" would display
"09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.density
mapbox.colorbar.Tickformatstop` instances or
dicts with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.densitymapbox.colorbar.tickformatstopdefaults
), sets the default property values to use for
elements of
densitymapbox.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.densitymapbox.colo
rbar.Title` instance or dict with compatible
properties
titlefont
Deprecated: Please use
densitymapbox.colorbar.title.font instead. Sets
this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
titleside
Deprecated: Please use
densitymapbox.colorbar.title.side instead.
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
plotly.graph_objs.densitymapbox.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
# colorscale
# ----------
@property
def colorscale(self):
"""
Sets the colorscale. The colorscale must be an array containing
arrays mapping a normalized value to an rgb, rgba, hex, hsl,
hsv, or named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To control the
bounds of the colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name string of the
following list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,Reds,Bl
ues,Picnic,Rainbow,Portland,Jet,Hot,Blackbody,Earth,Electric,Vi
ridis,Cividis.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'peach', 'phase', 'picnic', 'pinkyl', 'piyg',
'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn', 'puor',
'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu', 'rdgy',
'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar', 'spectral',
'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn', 'tealrose',
'tempo', 'temps', 'thermal', 'tropic', 'turbid', 'twilight',
'viridis', 'ylgn', 'ylgnbu', 'ylorbr', 'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['lon', 'lat', 'z', 'text', 'name'] joined with '+' characters
(e.g. 'lon+lat')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.densitymapbox.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for details on
the date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data described at
this link https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>". To
hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (lon,lat) pair If
a single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# lat
# ---
@property
def lat(self):
"""
Sets the latitude coordinates (in degrees North).
The 'lat' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lat"]
@lat.setter
def lat(self, val):
self["lat"] = val
# latsrc
# ------
@property
def latsrc(self):
"""
Sets the source reference on Chart Studio Cloud for lat .
The 'latsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["latsrc"]
@latsrc.setter
def latsrc(self, val):
self["latsrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# lon
# ---
@property
def lon(self):
"""
Sets the longitude coordinates (in degrees East).
The 'lon' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["lon"]
@lon.setter
def lon(self, val):
self["lon"] = val
# lonsrc
# ------
@property
def lonsrc(self):
"""
Sets the source reference on Chart Studio Cloud for lon .
The 'lonsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["lonsrc"]
@lonsrc.setter
def lonsrc(self, val):
self["lonsrc"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# radius
# ------
@property
def radius(self):
"""
Sets the radius of influence of one `lon` / `lat` point in
pixels. Increasing the value makes the densitymapbox trace
smoother, but less detailed.
The 'radius' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["radius"]
@radius.setter
def radius(self, val):
self["radius"] = val
# radiussrc
# ---------
@property
def radiussrc(self):
"""
Sets the source reference on Chart Studio Cloud for radius .
The 'radiussrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["radiussrc"]
@radiussrc.setter
def radiussrc(self, val):
self["radiussrc"] = val
# reversescale
# ------------
@property
def reversescale(self):
"""
Reverses the color mapping if true. If true, `zmin` will
correspond to the last color in the array and `zmax` will
correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# showscale
# ---------
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.densitymapbox.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.densitymapbox.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# subplot
# -------
@property
def subplot(self):
"""
Sets a reference between this trace's data coordinates and a
mapbox subplot. If "mapbox" (the default value), the data refer
to `layout.mapbox`. If "mapbox2", the data refer to
`layout.mapbox2`, and so on.
The 'subplot' property is an identifier of a particular
subplot, of type 'mapbox', that may be specified as the string 'mapbox'
optionally followed by an integer >= 1
(e.g. 'mapbox', 'mapbox1', 'mapbox2', 'mapbox3', etc.)
Returns
-------
str
"""
return self["subplot"]
@subplot.setter
def subplot(self, val):
self["subplot"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (lon,lat) pair If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (lon,lat) coordinates. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set, these
elements will be seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# z
# -
@property
def z(self):
"""
Sets the points' weight. For example, a value of 10 would be
equivalent to having 10 points of weight 1 in the same spot
The 'z' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["z"]
@z.setter
def z(self, val):
self["z"] = val
# zauto
# -----
@property
def zauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `z`) or the bounds set in
`zmin` and `zmax` Defaults to `false` when `zmin` and `zmax`
are set by the user.
The 'zauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zauto"]
@zauto.setter
def zauto(self, val):
self["zauto"] = val
# zmax
# ----
@property
def zmax(self):
"""
Sets the upper bound of the color domain. Value should have the
same units as in `z` and if set, `zmin` must be set as well.
The 'zmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmax"]
@zmax.setter
def zmax(self, val):
self["zmax"] = val
# zmid
# ----
@property
def zmid(self):
"""
Sets the mid-point of the color domain by scaling `zmin` and/or
`zmax` to be equidistant to this point. Value should have the
same units as in `z`. Has no effect when `zauto` is `false`.
The 'zmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmid"]
@zmid.setter
def zmid(self, val):
self["zmid"] = val
# zmin
# ----
@property
def zmin(self):
"""
Sets the lower bound of the color domain. Value should have the
same units as in `z` and if set, `zmax` must be set as well.
The 'zmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zmin"]
@zmin.setter
def zmin(self, val):
self["zmin"] = val
# zsrc
# ----
@property
def zsrc(self):
"""
Sets the source reference on Chart Studio Cloud for z .
The 'zsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["zsrc"]
@zsrc.setter
def zsrc(self, val):
self["zsrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the densitymapbox trace will be inserted
before the layer with the specified ID. By default,
densitymapbox traces are placed below the first layer
of type symbol If set to '', the layer will be inserted
above every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.densitymapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.densitymapbox.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
lat .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
lon .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
radius
Sets the radius of influence of one `lon` / `lat` point
in pixels. Increasing the value makes the densitymapbox
trace smoother, but less detailed.
radiussrc
Sets the source reference on Chart Studio Cloud for
radius .
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.densitymapbox.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the points' weight. For example, a value of 10
would be equivalent to having 10 points of weight 1 in
the same spot
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
"""
def __init__(
self,
arg=None,
autocolorscale=None,
below=None,
coloraxis=None,
colorbar=None,
colorscale=None,
customdata=None,
customdatasrc=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
lat=None,
latsrc=None,
legendgroup=None,
lon=None,
lonsrc=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
radius=None,
radiussrc=None,
reversescale=None,
showlegend=None,
showscale=None,
stream=None,
subplot=None,
text=None,
textsrc=None,
uid=None,
uirevision=None,
visible=None,
z=None,
zauto=None,
zmax=None,
zmid=None,
zmin=None,
zsrc=None,
**kwargs
):
"""
Construct a new Densitymapbox object
Draws a bivariate kernel density estimation with a Gaussian
kernel from `lon` and `lat` coordinates and optional `z` values
using a colorscale.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Densitymapbox`
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`colorscale`. In case `colorscale` is unspecified or
`autocolorscale` is true, the default palette will be
chosen according to whether numbers in the `color`
array are all positive, all negative or mixed.
below
Determines if the densitymapbox trace will be inserted
before the layer with the specified ID. By default,
densitymapbox traces are placed below the first layer
of type symbol If set to '', the layer will be inserted
above every existing layer.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.densitymapbox.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. The colorscale must be an array
containing arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At minimum,
a mapping for the lowest (0) and highest (1) values are
required. For example, `[[0, 'rgb(0,0,255)'], [1,
'rgb(255,0,0)']]`. To control the bounds of the
colorscale in color space, use`zmin` and `zmax`.
Alternatively, `colorscale` may be a palette name
string of the following list: Greys,YlGnBu,Greens,YlOrR
d,Bluered,RdBu,Reds,Blues,Picnic,Rainbow,Portland,Jet,H
ot,Blackbody,Earth,Electric,Viridis,Cividis.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.densitymapbox.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. Anything contained in tag `<extra>` is
displayed in the secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each (lon,lat)
pair If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (lon,lat)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
lat
Sets the latitude coordinates (in degrees North).
latsrc
Sets the source reference on Chart Studio Cloud for
lat .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
lon
Sets the longitude coordinates (in degrees East).
lonsrc
Sets the source reference on Chart Studio Cloud for
lon .
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
radius
Sets the radius of influence of one `lon` / `lat` point
in pixels. Increasing the value makes the densitymapbox
trace smoother, but less detailed.
radiussrc
Sets the source reference on Chart Studio Cloud for
radius .
reversescale
Reverses the color mapping if true. If true, `zmin`
will correspond to the last color in the array and
`zmax` will correspond to the first color.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
showscale
Determines whether or not a colorbar is displayed for
this trace.
stream
:class:`plotly.graph_objects.densitymapbox.Stream`
instance or dict with compatible properties
subplot
Sets a reference between this trace's data coordinates
and a mapbox subplot. If "mapbox" (the default value),
the data refer to `layout.mapbox`. If "mapbox2", the
data refer to `layout.mapbox2`, and so on.
text
Sets text elements associated with each (lon,lat) pair
If a single string, the same string appears over all
the data points. If an array of string, the items are
mapped in order to the this trace's (lon,lat)
coordinates. If trace `hoverinfo` contains a "text"
flag and "hovertext" is not set, these elements will be
seen in the hover labels.
textsrc
Sets the source reference on Chart Studio Cloud for
text .
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
z
Sets the points' weight. For example, a value of 10
would be equivalent to having 10 points of weight 1 in
the same spot
zauto
Determines whether or not the color domain is computed
with respect to the input data (here in `z`) or the
bounds set in `zmin` and `zmax` Defaults to `false`
when `zmin` and `zmax` are set by the user.
zmax
Sets the upper bound of the color domain. Value should
have the same units as in `z` and if set, `zmin` must
be set as well.
zmid
Sets the mid-point of the color domain by scaling
`zmin` and/or `zmax` to be equidistant to this point.
Value should have the same units as in `z`. Has no
effect when `zauto` is `false`.
zmin
Sets the lower bound of the color domain. Value should
have the same units as in `z` and if set, `zmax` must
be set as well.
zsrc
Sets the source reference on Chart Studio Cloud for z
.
Returns
-------
Densitymapbox
"""
super(Densitymapbox, self).__init__("densitymapbox")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Densitymapbox
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Densitymapbox`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("autocolorscale", None)
_v = autocolorscale if autocolorscale is not None else _v
if _v is not None:
self["autocolorscale"] = _v
_v = arg.pop("below", None)
_v = below if below is not None else _v
if _v is not None:
self["below"] = _v
_v = arg.pop("coloraxis", None)
_v = coloraxis if coloraxis is not None else _v
if _v is not None:
self["coloraxis"] = _v
_v = arg.pop("colorbar", None)
_v = colorbar if colorbar is not None else _v
if _v is not None:
self["colorbar"] = _v
_v = arg.pop("colorscale", None)
_v = colorscale if colorscale is not None else _v
if _v is not None:
self["colorscale"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("lat", None)
_v = lat if lat is not None else _v
if _v is not None:
self["lat"] = _v
_v = arg.pop("latsrc", None)
_v = latsrc if latsrc is not None else _v
if _v is not None:
self["latsrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("lon", None)
_v = lon if lon is not None else _v
if _v is not None:
self["lon"] = _v
_v = arg.pop("lonsrc", None)
_v = lonsrc if lonsrc is not None else _v
if _v is not None:
self["lonsrc"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("radius", None)
_v = radius if radius is not None else _v
if _v is not None:
self["radius"] = _v
_v = arg.pop("radiussrc", None)
_v = radiussrc if radiussrc is not None else _v
if _v is not None:
self["radiussrc"] = _v
_v = arg.pop("reversescale", None)
_v = reversescale if reversescale is not None else _v
if _v is not None:
self["reversescale"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("showscale", None)
_v = showscale if showscale is not None else _v
if _v is not None:
self["showscale"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("subplot", None)
_v = subplot if subplot is not None else _v
if _v is not None:
self["subplot"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("z", None)
_v = z if z is not None else _v
if _v is not None:
self["z"] = _v
_v = arg.pop("zauto", None)
_v = zauto if zauto is not None else _v
if _v is not None:
self["zauto"] = _v
_v = arg.pop("zmax", None)
_v = zmax if zmax is not None else _v
if _v is not None:
self["zmax"] = _v
_v = arg.pop("zmid", None)
_v = zmid if zmid is not None else _v
if _v is not None:
self["zmid"] = _v
_v = arg.pop("zmin", None)
_v = zmin if zmin is not None else _v
if _v is not None:
self["zmin"] = _v
_v = arg.pop("zsrc", None)
_v = zsrc if zsrc is not None else _v
if _v is not None:
self["zsrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "densitymapbox"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotly/python-api
|
packages/python/plotly/plotly/graph_objs/_densitymapbox.py
|
Python
|
mit
| 74,226
|
[
"Gaussian"
] |
67cb174749fe3b3d36a62ad558489d11d21e5ce85da752c450034f81648fc0b9
|
import pandas as pd
import itertools as it
import seaborn as sns
import numpy as np
from pymea import matlab_compatibility as mc
from matplotlib import pyplot as plt
from matplotlib import mlab as mlab
import random
from datetime import datetime, timedelta
from pymea import supplement_to_plotting as psupp
import math
def plot_units_from_spike_table(spike_table):
time_vector = spike_table['time'].map(mc.datetime_str_to_datetime)
unit_table = spike_table.copy()
del unit_table['time']
num_units = len(unit_table.columns)
#plt.figure(figsize=(10, 0.1 * num_units))
for i, unit_name in enumerate(unit_table.columns):
#plt.subplot(num_units, 1, i + 1)
plt.figure()
plot_unit(time_vector, unit_table[unit_name])
plt.xlabel(unit_name)
def smooth(A, kernel_size=5, mode='same'):
"""
Computes the moving average of A using a kernel_size kernel.
"""
kernel = np.ones(kernel_size)/kernel_size
return np.convolve(A, kernel, mode=mode)
def plot_unit_traces(category_dataframe, yscale = 'linear', **plot_kwargs):
"""
Plots spike frequency unit traces for each neural unit in the provided category dataframe
"""
for unit in category_dataframe['unit_name'].unique():
unit_table = category_dataframe.query('unit_name == @unit')
plt.plot(unit_table['time'], unit_table['spike_freq'], **plot_kwargs)
plt.yscale(yscale)
def plot_unit_traces_plus_means(category_dataframe, yscale = 'linear', repeated = False, data_col = 'spike_freq', alt_x = False, x_label = 'Time (days)', title = 'Unit Traces and Mean', **plot_kwargs):
"""
Plots spike frequency unit traces for each neural unit in the provided category dataframe, along with
the mean trace (in black)
"""
time_days = (category_dataframe['time']-category_dataframe['time'].iloc[0]).map(lambda x: x.days)
time_seconds = (category_dataframe['time']-category_dataframe['time'].iloc[0]).map(lambda x: x.seconds)
time_vector = (time_days + (time_seconds/3600/24)).unique()
for unit in category_dataframe['unit_name'].unique():
unit_table = category_dataframe.query('unit_name == @unit')
if repeated == True:
time_vector = unit_table['time']
plt.plot(time_vector, unit_table[data_col], alpha=0.4, **plot_kwargs)
mean_freq_traces = category_dataframe.groupby(('condition', 'time'))[data_col].mean()
mean_freq_traces = mean_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
for condition in mean_freq_traces['condition'].unique():
condition_trace = mean_freq_traces.query('condition == @condition')
if repeated == True:
time_vector = condition_trace['time']
plt.plot(time_vector, condition_trace[data_col], 'k')
plt.yscale(yscale)
plt.xlabel(x_label)
plt.ylabel(data_col)
plt.title(title)
plt.legend(mean_freq_traces['condition'].unique())
def plot_unit_traces_plus_medians(category_dataframe, yscale = 'linear', data_col = 'spike_freq', alt_x = False, x_label = 'time', title = 'Spike Frequency Traces', **plot_kwargs):
"""
Plots spike frequency unit traces for each neural unit in the provided category dataframe, along with
the mean trace (in black)
"""
time_days = (category_dataframe['time']-category_dataframe['time'].iloc[0]).map(lambda x: x.days)
time_seconds = (category_dataframe['time']-category_dataframe['time'].iloc[0]).map(lambda x: x.seconds)
time_vector = (time_days + (time_seconds/3600/24)).unique()
for unit in category_dataframe['unit_name'].unique():
unit_table = category_dataframe.query('unit_name == @unit')
plt.plot(time_vector, unit_table[data_col], **plot_kwargs)
mean_freq_traces = category_dataframe.groupby(('condition', 'time'))[data_col].median()
mean_freq_traces = mean_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
for condition in mean_freq_traces['condition'].unique():
condition_trace = mean_freq_traces.query('condition == @condition')
plt.plot(time_vector, condition_trace[data_col], 'k')
plt.yscale(yscale)
plt.xlabel(x_label)
plt.ylabel('spike frequency')
plt.title(title)
plt.legend(mean_freq_traces['condition'].unique())
def plot_unit_points_plus_means(category_dataframe, title, divide_fn, **plot_kwargs):
"""
Plots spike frequency points for each neural unit in the provided category dataframe, in each section returned by
divide_fn. Mean of all units of the same category for each section is also shown.
"""
color_map = plt.cm.get_cmap('viridis', category_dataframe['condition'].unique().size)
color_index = 0
for cond in category_dataframe['condition'].unique():
cond_table = category_dataframe.query('condition == @cond')
cond_table = cond_table.reset_index()
# Get spks/pulse for each neuron in each time period
unit_table = cond_table.groupby(('unit_name', lambda x: divide_fn(cond_table, x, 'time')))['spike_freq'].mean()
unit_table = unit_table.rename('spike frequency').reset_index() # Convert the multiindexed series back to a dataframe
# Get average spks/pulse for all neurons in each time period
mean_table = unit_table.groupby('level_1')['spike frequency'].mean()
mean_table = mean_table.reset_index() # Convert the multiindexed series back to a dataframe
plt.plot(unit_table['level_1'].astype('int')*3, unit_table['spike frequency'], 'o', color = color_map(color_index), markerfacecolor = 'none', alpha = 0.25, label = '_nolegend_')
plt.plot(mean_table['level_1'].astype('int')*3, mean_table['spike frequency'], 'o', color = color_map(color_index))
color_index += 1 # Move to next color for next condition
plt.axhline(y=1, xmin=0, xmax=1, hold=None, color='black')
plt.legend(category_dataframe['condition'].unique())
plt.xlabel('hours since start of 1st recording')
plt.ylabel('spikes/pulse')
plt.title(title)
def average_timecourse_plot(category_dataframe, **kwargs):
"""
Generates an average timecourse with error bars for each category in category_dataframe
see construct_categorized_dataframe for details on generateing the category_dataframe
"""
sns.pointplot(x='time', y='spike_freq', hue='condition', data=category_dataframe, **kwargs)
def avg_timecourse_plot_2(category_dataframe, **kwargs):
mean_freqs = category_dataframe.groupby(('condition', 'time'))['spike_freq'].mean()
std_freqs = category_dataframe.groupby(('condition', 'time'))['spike_freq'].std()
plt.errorbar()
def plot_unit_frequency_distributions(category_dataframe, **kwargs):
"""
Plots the distribution of mean frequencies for units in each condition
"""
mean_freqs_by_condition = category_dataframe.groupby(('condition', 'unit_name'))['spike_freq'].mean()
mean_freqs_by_condition = mean_freqs_by_condition.rename('mean_freq').reset_index()
for condition in mean_freqs_by_condition['condition']:
sns.distplot(mean_freqs_by_condition.query('condition == @condition')['mean_freq'].map(np.log), bins=100)
def plot_mean_frequency_traces(category_dataframe, data_col = 'spike_freq', alt_x = False, x_label = 'time', title = 'Mean Traces', Ret = False, **kwargs):
"""
Plots the mean frequency trace for each condition in category_dataframe
"""
mean_freq_traces = category_dataframe.groupby(('condition', 'time'))[data_col].mean()
mean_freq_traces = mean_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
for condition in mean_freq_traces['condition'].unique():
condition_trace = mean_freq_traces.query('condition == @condition')
if alt_x == False:
plt.plot(condition_trace['time'], condition_trace[data_col])
else:
plt.plot(alt_x, condition_trace[data_col])
plt.xlabel(x_label)
plt.ylabel(data_col)
plt.title(title)
plt.legend(mean_freq_traces['condition'].unique())
if Ret == True:
return mean_freq_traces
def plot_median_frequency_traces(category_dataframe, yscale = 'linear', quartiles = True, data_col = 'spike_freq', alt_x = False, x_label = 'time', **kwargs):
"""
Plots the median frequency trace for each condition in category_dataframe
"""
median_freq_traces = category_dataframe.groupby(('condition', 'time'))[data_col].median()
Q1 = category_dataframe.groupby(('condition', 'time'))[data_col].quantile(.25)
Q3 = category_dataframe.groupby(('condition', 'time'))[data_col].quantile(.75)
median_freq_traces = median_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
Q1 = Q1.rename(data_col).reset_index()
Q3 = Q3.rename(data_col).reset_index()
for condition in median_freq_traces['condition'].unique():
condition_trace = median_freq_traces.query('condition == @condition')
if alt_x == False:
ct = plt.plot(condition_trace['time'], condition_trace[data_col])
else:
ct = plt.plot(alt_x, condition_trace[data_col])
if quartiles == True:
Q1_trace = Q1.query('condition == @condition')
Q3_trace = Q3.query('condition == @condition')
if alt_x == False:
plt.plot(Q1_trace['time'], Q1_trace[data_col], '--', color = ct[0].get_color())
plt.plot(Q3_trace['time'], Q3_trace[data_col], '--', color = ct[0].get_color())
else:
plt.plot(alt_x, Q1_trace[data_col], '--', color = ct[0].get_color())
plt.plot(alt_x, Q3_trace[data_col], '--', color = ct[0].get_color())
plt.yscale(yscale)
plt.xlabel(x_label)
plt.ylabel('spike frequency')
plt.title('Median Spike Frequency Traces')
plt.legend(median_freq_traces['condition'].unique())
def get_median_unit_traces(category_dataframe):
"""
Finds the unit traces in category_dataframe with the median average firing rate
"""
overall_mean_freq = category_dataframe.groupby(('unit_name', 'condition'))['spike_freq'].mean()
overall_mean_freq = overall_mean_freq.rename('spike_freq').reset_index() # Convert the multiindexed series back to a dataframe
median_traces = pd.DataFrame()
for condition in overall_mean_freq['condition'].unique():
condition_trace = overall_mean_freq.query('condition == @condition')
n = len(condition_trace['spike_freq'])
if n%2 == 0:
sorted_freq = sorted(condition_trace['spike_freq'])
median_freq = sorted_freq[n//2 - 1]
else:
median_freq = np.median(condition_trace['spike_freq'])
median_unit = condition_trace[condition_trace.spike_freq == median_freq]['unit_name']
median_unit.reset_index(drop=True, inplace = True)
median_unit = median_unit.iloc[0]
median_traces = pd.concat([median_traces, category_dataframe.query('unit_name == @median_unit')])
return median_traces
def plot_median_unit_frequency_traces(category_dataframe, rec_starts, rec_ends, yscale = 'linear', **kwargs):
"""
Plots the frequency trace of the unit with the median avg spike freq for each condition in category_dataframe
"""
median_traces = get_median_unit_traces(category_dataframe)
for condition in median_traces:
plot_unit_means_per_rec(median_traces.query('condition == @cond'), rec_starts, rec_ends, num_rec, yscale)
plt.ylabel('spike frequency')
plt.title('Median Unit Spike Frequency Traces')
plt.legend(overall_mean_freq['condition'].unique())
def plot_unit_means_per_rec(category_dataframe, rec_starts, rec_ends, num_rec, yscale = 'linear', **plot_kwargs):
"""
Plots the mean firing of each unit per recording session
"""
mean_unit_freq = pd.DataFrame()
for index in range(0,num_rec):
start1 = rec_starts[index]
end1 = rec_ends[index]
rec_table = category_dataframe.query('time >= @start1 and time <= @end1')
rec_mean_unit_freq = rec_table.groupby('unit_name')['spike_freq'].mean()
num_units = rec_mean_unit_freq.count()
start_dt = datetime.strptime(start1, "%Y-%m-%d %H:%M:%S").date()
start_times = pd.Series([start1]*num_units, index = rec_mean_unit_freq.index)
rec_data = pd.DataFrame({"mean_freq": rec_mean_unit_freq, "start_time": start_times})
del rec_data.index.name
rec_data.reset_index()
rec_data['unit_name'] = rec_mean_unit_freq.index
mean_unit_freq = pd.concat([mean_unit_freq, rec_data])
for unit in mean_unit_freq['unit_name'].unique():
date_table = mean_unit_freq.query('unit_name == @unit')
plt.plot_date(date_table['start_time'], date_table['mean_freq'], '-o')
plt.yscale(yscale)
plt.xlabel('time')
plt.ylabel('mean spike frequency')
plt.title('Mean Spike Frequency Per Recording')
def plot_means_per_rec(category_dataframe, rec_starts, rec_ends, num_rec, yscale = 'linear', **plot_kwargs):
"""
Plots the mean firing of each condition per recording session
"""
mean_unit_freq = pd.DataFrame()
for index in range(0,num_rec):
start1 = rec_starts[index]
end1 = rec_ends[index]
rec_table = category_dataframe.query('time >= @start1 and time <= @end1')
rec_mean_unit_freq = rec_table.groupby('condition')['spike_freq'].mean()
num_units = rec_mean_unit_freq.count()
start_dt = datetime.strptime(start1, "%Y-%m-%d %H:%M:%S").date()
start_times = pd.Series([start1]*num_units, index = rec_mean_unit_freq.index)
rec_data = pd.DataFrame({"mean_freq": rec_mean_unit_freq, "start_time": start_times})
del rec_data.index.name
rec_data.reset_index()
rec_data['condition'] = rec_mean_unit_freq.index
mean_unit_freq = pd.concat([mean_unit_freq, rec_data])
for cond in mean_unit_freq['condition'].unique():
date_table = mean_unit_freq.query('condition == @cond')
plt.plot_date(date_table['start_time'], date_table['mean_freq'], '-o')
plt.yscale(yscale)
plt.xlabel('time')
plt.ylabel('mean spike frequency')
plt.title('Mean Spike Frequency Per Recording')
plt.legend(mean_unit_freq['condition'].unique())
def plot_medians_per_rec(category_dataframe, rec_starts, rec_ends, num_rec, yscale='linear', **plot_kwargs):
"""
Plots the median firing of each condition per recording session
"""
median_unit_freq = pd.DataFrame()
for index in range(0,num_rec):
start1 = rec_starts[index]
end1 = rec_ends[index]
rec_table = category_dataframe.query('time >= @start1 and time <= @end1')
rec_median_unit_freq = rec_table.groupby('condition')['spike_freq'].median()
num_units = rec_median_unit_freq.count()
start_dt = datetime.strptime(start1, "%Y-%m-%d %H:%M:%S").date()
start_times = pd.Series([start1]*num_units, index = rec_median_unit_freq.index)
rec_data = pd.DataFrame({"median_freq": rec_median_unit_freq, "start_time": start_times})
del rec_data.index.name
rec_data.reset_index()
rec_data['condition'] = rec_median_unit_freq.index
median_unit_freq = pd.concat([median_unit_freq, rec_data])
for cond in median_unit_freq['condition'].unique():
date_table = median_unit_freq.query('condition == @cond')
plt.plot_date(date_table['start_time'], date_table['median_freq'], '-o')
plt.yscale(yscale)
plt.xlabel('time')
plt.ylabel('median spike frequency')
plt.title('Median Spike Frequency Per Recording')
plt.legend(median_unit_freq['condition'].unique())
def construct_categorized_dataframe(data_table, filter_dict, var_name = 'spike_freq'):
"""
Takes the data from the matlab csv generated by preprocessing and applies filters to column names
allowing for the categorization of data
data_table - pandas DataFrame - should be populated from the .csv file generated by the
"generate_frequency_table.m" matlab script
filter_dict - dictionary of the form {'condition_name': condition_filter}, where
condition_name is a string used to identify an experimental condition, and condition filter
is a function that returns True for the unit_names corresponding to the desired condition
"""
time_vector = data_table['time'].map(mc.datetime_str_to_datetime)
unit_table = data_table.drop('time', axis=1)
condition_dicts = (
{
'time': time_vector,
'condition': condition_name,
var_name: condition_column,
'unit_name': condition_column.name,
'well': mc.get_well_number(condition_column.name)
} for condition_name, condition_filter in filter_dict.iteritems()
for condition_column in filter_unit_columns(condition_filter, unit_table)
)
condition_tables = it.imap(pd.DataFrame, condition_dicts)
return pd.concat(condition_tables)
def construct_categorized_dataframe_burst(data_table, filter_dict):
"""
Takes the data from the matlab csv generated by preprocessing and applies filters to column names
allowing for the categorization of data
data_table - pandas DataFrame - should be populated from the .csv file generated by the
"generate_frequency_table.m" matlab script
filter_dict - dictionary of the form {'condition_name': condition_filter}, where
condition_name is a string used to identify an experimental condition, and condition filter
is a function that returns True for the unit_names corresponding to the desired condition
get_power - function that returns the power of optical stimulation by mapping the unit name to the
well map
get_width - function that returns the width of each pulse of optical stimulation by mapping the unit
name to the well map
"""
condition_table = pd.DataFrame()
for condition_name, condition_filter in filter_dict.iteritems():#iterates through each condition
filtered_table = filter_unit_rows(condition_filter, data_table)
#print(filtered_table)
filtered_table['condition'] = condition_name
condition_table = condition_table.append(filtered_table, ignore_index=True)
return condition_table
def filter_unit_columns(predicate, unit_table):
"""
Generates columns from unit_table whose names satisfy the condition specified in predicate
predicate - function that returns true for desired unit names
unit_table - data_mat containing firing rates over time from each unit, with the time column ommited
"""
unit_column_names = filter(predicate, unit_table.columns)
for column_name in unit_column_names:
yield unit_table[column_name]
def filter_unit_rows(predicate, data_table):
"""
Generates rows from unit_table whose times satisfy the condition specified in predicate
predicate - function that returns true for desired unit names
data_table - data_mat containing data over time from each unit, with the time column included
"""
data_row_units = filter(predicate, data_table['unit_name'].unique())
selected_unit_table = pd.DataFrame()
for row_unit in data_row_units:
selected_unit_table = pd.concat([selected_unit_table, data_table[data_table['unit_name'] == row_unit]])
return selected_unit_table
def smooth_categorized_dataframe_unit_traces(category_dataframe, kernel_size=5):
cat_df_copy = category_dataframe.copy()
for unit_name in cat_df_copy['unit_name'].unique():
unit_table = cat_df_copy.query('unit_name == @unit_name')
smooth_trace = smooth(unit_table['spike_freq'], kernel_size=kernel_size, mode='same')
cat_df_copy.loc[cat_df_copy['unit_name'] == unit_name, 'spike_freq'] = smooth_trace
return cat_df_copy
def makeTables(b_start, b_stop, s_start, e_start, cat_table):
'''
Makes tables of the baseline portion, stimulated portion and the end portion (i.e. the part of the time course that you deem to have adapted) from the table of the whole time course
'''
baseline_table = cat_table.query('time < "%s"'%b_stop).query('time > "%s"'%b_start)
stim_table = cat_table.query('time > "%s"'%s_start)
end_table = cat_table.query('time > "%s"'%e_start)
return(baseline_table, stim_table, end_table)
def get_mean_med_traces(c_filter, data_col, b_filter, FR_gradient):
mean_freq_traces = c_filter.groupby(('condition', 'time'))[data_col].mean()
mean_freq_traces = mean_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
mean_freq_traces_b = b_filter.groupby(('condition', 'time'))[data_col].mean()
mean_freq_traces_b = mean_freq_traces_b.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
median_freq_traces = c_filter.groupby(('condition', 'time'))[data_col].median()
median_freq_traces = median_freq_traces.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
median_freq_traces_b = b_filter.groupby(('condition', 'time'))[data_col].median()
median_freq_traces_b = median_freq_traces_b.rename(data_col).reset_index() # Convert the multiindexed series back to a dataframe
if FR_gradient == True:
b_mean_freq = b_filter.groupby(('unit_name'))['spike_freq'].mean()
b_mean_freq = b_mean_freq.rename('spike_freq')#.reset_index()
u_color = (np.log10(b_mean_freq)+3)/3
else:
b_mean_freq = b_filter.groupby(('unit_name'))['spike_freq'].mean()
b_mean_freq = b_mean_freq.rename('spike_freq')#.reset_index()
u_color = np.random.random_sample()*b_mean_freq
return(mean_freq_traces, mean_freq_traces_b, median_freq_traces, median_freq_traces_b, u_color)
def make_fold_plot(c_filter, t_start, u_color, FR_gradient, plotFolds, norm_by_median, norm_by_mean, mean_freq_traces_b, median_freq_traces_b, mean_freq_traces, median_freq_traces, y_scale, data_col, data_col_mm, title, ymax):
plt.xlabel('Time (days)')
plt.ylim(0.00005,ymax)
for unit_name in c_filter['unit_name'].unique():
unit = c_filter.query('unit_name == @unit_name')
u_time = unit['time']
time_vector_u = u_time-t_start
time_vector_u = time_vector_u.map(lambda x: x.total_seconds()/86400.0)
this_color = u_color[unit_name]
if FR_gradient == True:
if plotFolds == True:
if norm_by_median.empty == False:
plt.plot(time_vector_u, np.divide(unit['folds'], norm_by_mean), color=plt.cm.gnuplot2(this_color, .4))
else:
plt.plot(time_vector_u, unit['folds'], color=plt.cm.gnuplot2(this_color, .4))
else:
plt.plot(time_vector_u, unit[data_col], color=plt.cm.gnuplot2(this_color, .4))
#color_ind = color_ind+1
else:
if plotFolds == True:
if norm_by_median.empty == False:
plt.plot(time_vector_u, np.divide(unit['folds'], norm_by_mean), color=(random.random(), random.random(), random.random(), .4))
else:
plt.plot(time_vector_u, unit['folds'], color=(random.random(), random.random(), random.random(), .4))
else:
plt.plot(time_vector_u, unit[data_col], color=(random.random(), random.random(), random.random(), .4))
#print(mean_freq_traces_b)
meanOfMean = np.mean(mean_freq_traces_b[data_col_mm])
meanOfMedian = np.mean(median_freq_traces_b[data_col_mm])
m_time = mean_freq_traces['time']
time_vector_m = m_time-t_start
time_vector_m = time_vector_m.map(lambda x: x.total_seconds()/86400.0)
if plotFolds == True:
plt.axhline(y=1, xmin=0, xmax=1, hold=None, color='black')
if norm_by_mean.empty == False:
plt.plot(time_vector_m, np.divide(mean_freq_traces[data_col]/meanOfMean,norm_by_mean), color=(0,0,0))
plt.plot(time_vector_m, np.divide(median_freq_traces[data_col]/meanOfMedian,norm_by_median), 'r')
else:
plt.plot(time_vector_m, mean_freq_traces[data_col_mm]/meanOfMean, color=(0,0,0))
plt.plot(time_vector_m, median_freq_traces[data_col_mm]/meanOfMedian, 'r')
plt.ylabel('Fold Induction of Spike Frequency (Hz)')
else:
plt.axhline(y=meanOfMean, xmin=0, xmax=1, hold=None, color='black')
plt.plot(time_vector_m, mean_freq_traces[data_col], color=(0,0,0))
plt.plot(time_vector_m, median_freq_traces[data_col], 'r')
plt.ylabel('Spike Frequency (Hz)')
plt.yscale(y_scale)
plt.title(title)
plt.show()
return(meanOfMean, meanOfMedian, time_vector_m)
def foldInductionPlusMean_stim(cat_table, baseline_table, stim_table, condition, title, var, minHz, maxHz, ymax, plotFolds, foldMin, y_scale, filter_wells, data_col, data_col_mm, plot_group, FR_gradient, norm_by_mean, norm_by_median, plot_wells):
'''
This function plots baseline-normalized plots for a given condition that include both all of the channels passing filters and the mean(black)+median(red) of those channels--use for stimulated samples b/c filters out things that don't change with stim
'''
c = cat_table.query('condition == "%s"'%condition)
b = baseline_table.query('condition == "%s"'%condition)
s = stim_table.query('condition == "%s"'%condition)
t_start = min(s['time'])
c_filter, b_filter, count_real, count_live, cf = psupp.filter_neurons_homeostasis(c, b, s, ind_filter=True, var=var, minHz=minHz, maxHz=maxHz, foldMin=foldMin, filter_wells=filter_wells, data_col=data_col)
if c_filter.empty:
print "No valid units for condition",condition
print('respond to drug: 0')
print('stay alive: ' + str(count_live))
print('real: ' + str(count_real))
print('condition: ' + str(len(c['unit_name'].unique())))
return
if plot_group != 0:
c_filter, b_filter = psupp.select_homeo_units(plot_group, c_filter, b_filter)
mean_freq_traces, mean_freq_traces_b, median_freq_traces, median_freq_traces_b, u_color = get_mean_med_traces(c_filter, data_col_mm, b_filter, FR_gradient)
meanOfMean, meanOfMedian, time_vector_m = make_fold_plot(c_filter, t_start, u_color, FR_gradient, plotFolds, norm_by_median, norm_by_mean, mean_freq_traces_b, median_freq_traces_b, mean_freq_traces, median_freq_traces, y_scale, data_col, data_col_mm, title, ymax)
#plot individual well plots
if plot_wells == True:
for w in c_filter['well'].unique():
plt.figure()
well_c = c_filter.query('well == @w')
well_b = b_filter.query('well == @w')
well_mft, well_mftb, well_mdft, well_mdftb, well_color = get_mean_med_traces(well_c, data_col_mm, well_b, FR_gradient)
well_title = 'Well ' + str(w)
make_fold_plot(well_c, t_start, well_color, FR_gradient, plotFolds, norm_by_median, norm_by_mean, well_mftb, well_mdftb, well_mft, well_mdft, y_scale, data_col, data_col_mm, well_title, ymax)
print('respond to drug: ' + str(len(c_filter['unit_name'].unique())))
print('stay alive: ' + str(count_live))
print('real: ' + str(count_real))
print('condition: ' + str(len(c['unit_name'].unique())))
return (c_filter['unit_name'].unique(), mean_freq_traces[data_col_mm]/meanOfMean, median_freq_traces[data_col_mm]/meanOfMedian, time_vector_m)
def foldInductionPlusMean_ctrl(cat_table, baseline_table, stim_table, condition, title, var, minHz, maxHz, ymax, plotFolds, foldMin, y_scale, filter_wells, data_col, data_col_mm, plot_group, FR_gradient, norm_by_mean, norm_by_median, plot_wells):
'''
This function plots baseline-normalized plots for a given condition that include both all of the channels passing filters and the mean(black)+median(red) of those channels--use for unstim samples
'''
c = cat_table.query('condition == "%s"'%condition)
b = baseline_table.query('condition == "%s"'%condition)
s = stim_table.query('condition == "%s"'%condition)
t_start = min(s['time'])
c_filter, b_filter, count_real, count_live, count_final = psupp.filter_neurons_homeostasis(c, b, s, ind_filter=False, var=var, minHz=minHz, maxHz=maxHz, foldMin=foldMin, filter_wells=False, data_col = data_col)
if c_filter.empty:
print "No valid units for condition",condition
print('stay alive: ' + str(count_live))
print('real: ' + str(count_real))
print('condition: ' + str(len(c['unit_name'].unique())))
return (0,0,0)
# select to show only neurons that do homeostase or don't
if plot_group != 0:
c_filter, b_filter = psupp.select_homeo_units(plot_group, c_filter, b_filter)
mean_freq_traces, mean_freq_traces_b, median_freq_traces, median_freq_traces_b, u_color = get_mean_med_traces(c_filter, data_col_mm, b_filter, FR_gradient)
meanOfMean, meanOfMedian, time_vector_m = make_fold_plot(c_filter, t_start, u_color, FR_gradient, plotFolds, norm_by_median, norm_by_mean, mean_freq_traces_b, median_freq_traces_b, mean_freq_traces, median_freq_traces, y_scale, data_col, data_col_mm, title, ymax)
#plot individual well plots
if plot_wells == True:
for w in c_filter['well'].unique():
plt.figure()
well_c = c_filter.query('well == @w')
well_b = b_filter.query('well == @w')
well_mft, well_mftb, well_mdft, well_mdftb, well_color = get_mean_med_traces(well_c, data_col_mm, well_b, FR_gradient)
well_title = 'Well ' + str(w)
make_fold_plot(well_c, t_start, well_color, FR_gradient, plotFolds, norm_by_median, norm_by_mean, well_mftb, well_mdftb, well_mft, well_mdft, y_scale, data_col, data_col_mm, well_title, ymax)
print('stay alive: ' + str(count_live))
print('real: ' + str(count_real))
print('condition: ' + str(len(c['unit_name'].unique())))
plt.show()
return (c_filter['unit_name'].unique(), mean_freq_traces[data_col_mm]/meanOfMean, median_freq_traces[data_col_mm]/meanOfMedian, time_vector_m)
def foldInductionPlusMean(cat_table, drug_time, condition, title, var=10, minHz = 0.001, maxHz = 100, ind_filter = True, ymax = 10, plotFolds = True, foldMin = 0.001, y_scale = 'linear', filter_wells = False, data_col ='spike_freq', data_col_mm = 'folds', plot_group = 0, FR_gradient = True, norm_by_mean = pd.Series([]), norm_by_median = pd.Series([]), plot_wells=True):
'''
Combine stim and ctrl fxns
'''
mean = False
median = False
baseline_table = cat_table.query('time < @drug_time')
stim_table = cat_table.query('time >= @drug_time')
if ind_filter:
filtered_units, mean, median, time_vector = foldInductionPlusMean_stim(cat_table, baseline_table, stim_table, condition, title, var, minHz, maxHz, ymax, plotFolds, foldMin, y_scale, filter_wells, data_col, data_col_mm, plot_group, FR_gradient, norm_by_mean, norm_by_median, plot_wells)
else:
filtered_units, mean, median, time_vector = foldInductionPlusMean_ctrl(cat_table, baseline_table, stim_table, condition, title, var, minHz, maxHz, ymax, plotFolds, foldMin, y_scale, filter_wells, data_col, data_col_mm, plot_group, FR_gradient, norm_by_mean, norm_by_median, plot_wells)
return filtered_units, mean, median, time_vector
def count_active_neurons(cat_table, baseline_table = 0, stim_table = 0, threshold = 0.001, folds = 0, kill_neurons = 0, return_value = 0):
'''
Count and plot the number of neurons firing above a threshold at each time point. If folds == 1, a neuron is deemed firing
if its fold induction is greater than threshold. If kill_neurons == 1, a neuron is deemed dead for the rest of the
experiment as soon as its fold induction goes below threshold.
'''
time_days = (cat_table['time']-cat_table['time'].iloc[0]).map(lambda x: x.days)
time_seconds = (cat_table['time']-cat_table['time'].iloc[0]).map(lambda x: x.seconds)
time_vector = (time_days + (time_seconds/3600/24)).unique()
if folds == 0:
count_table = cat_table
elif folds == 1:
meanOfBaseline = baseline_table.groupby('unit_name')['spike_freq'].mean()
meanOfBaseline = meanOfBaseline.reset_index()
count_table = pd.DataFrame()
for unit in baseline_table['unit_name'].unique():
unit_table = cat_table.query('unit_name == @unit')
unit_mean_b = meanOfBaseline.query('unit_name == @unit')['spike_freq']
unit_mean_b = unit_mean_b.reset_index()
b = unit_mean_b.get_value(0,'spike_freq')
unit_table.loc[:,'spike_freq'] = unit_table['spike_freq']/b
below_fold_thresh = unit_table.query('spike_freq < @threshold')
if not below_fold_thresh.empty:
first_death = min(below_fold_thresh['time'])
unit_table.set_value((unit_table.loc[unit_table['time'] > first_death]).index, 'spike_freq', 0)
ccc=unit_table
count_table = pd.concat([count_table, unit_table])
above_threshold = count_table.query('spike_freq > @threshold')
time_grouped_counts = above_threshold.groupby(('time'))['unit_name'].count()
time_grouped_counts = time_grouped_counts.rename('count').reset_index() # Convert the multiindexed series back to a dataframe
plt.plot(time_vector, time_grouped_counts['count'])
plt.xlabel('time')
plt.ylabel('Number of active units')
plt.title(cat_table.get_value(0,'condition')[0])
if return_value:
return time_grouped_counts
def compare_active_per_recording(cat_table, threshold, rec_starts, rec_ends, num_rec):
'''
For each recording session, find the number of new neurons and the
number of neurons that have stopped firing
'''
above_threshold = cat_table.query('spike_freq > @threshold')
only_1 = [0]*(num_rec-1);
only_2 = [0]*(num_rec-1);
for index in range(0,num_rec-1):
start1 = rec_starts[index]
end1 = rec_ends[index]
start2 = rec_starts[index+1]
end2 = rec_ends[index+1]
group_1 = above_threshold.query('time >= @start1 and time <= @end1')
group_2 = above_threshold.query('time >= @start2 and time <= @end2')
units_1 = group_1['unit_name'].unique()
units_2 = group_2['unit_name'].unique()
both = list(set(units_1) | set(units_2))
only_1[index] = len(both) - len(units_2) #Count the number of units in group1 but not group2
only_2[index] = len(both) - len(units_1)
rec_starts_series = pd.Series(rec_starts)
recs = rec_starts_series.map(mc.remapped_str_to_datetime)
plt.plot_date(recs[1:num_rec], only_2, '-', label = "new")
plt.plot_date(recs[1:num_rec], only_1, '-', label = "died")
plt.legend()
plt.xlabel('Recording session')
plt.ylabel('Number of units')
plt.title('Neuron turnover')
def compare_active_per_sec(cat_table, threshold):
'''For each recording session, find the number of new neurons and the
number of neurons that have stopped firing
'''
above_threshold = cat_table.query('spike_freq > @threshold').reset_index()
secs = above_threshold['time']
num_sec = len(secs)
only_1 = [0]*(num_sec-1);
only_2 = [0]*(num_sec-1);
for index in range(0,num_sec-1):
start1 = secs.iloc[index]
start2 = secs.iloc[index+1]
group_1 = above_threshold.query('time == @start1')
group_2 = above_threshold.query('time == @start2')
units_1 = group_1['unit_name'].unique()
units_2 = group_2['unit_name'].unique()
both = list(set(units_1) | set(units_2))
only_1[index] = len(both) - len(units_2) #Count the number of units in group1 but not group2
only_2[index] = len(both) - len(units_1)
plt.plot_date(secs.iloc[0:num_sec-1], only_2, '-', label = "new")
plt.plot_date(secs.iloc[0:num_sec-1], only_1, '-', label = "died")
plt.legend()
plt.xlabel('Recording session')
plt.ylabel('Number of units')
plt.title('Neuron turnover')
def unit_mean_freq_hist(category_dataframe, num_bins = 50, plot = 'linear', title = 'Mean Firing Rate Per Unit'):
'''
Plots histogram showing the distribution of mean firing rate of each unit in category_dataframe
'''
unit_freq_mean = category_dataframe.groupby(('unit_name'))['spike_freq'].mean()
unit_freq_mean = unit_freq_mean.rename('spike_freq').reset_index() # Convert the multiindexed series back to a dataframe
unit_freq_mean = unit_freq_mean.query('spike_freq > 0')
sigma = unit_freq_mean['spike_freq'].std()
mu = unit_freq_mean['spike_freq'].mean()
if plot == 'linear':
n, bins, patches = plt.hist(unit_freq_mean['spike_freq'], bins = num_bins)
elif plot == 'log':
n, bins, patches = plt.hist(np.log10(unit_freq_mean['spike_freq']), bins = num_bins)
# add a 'best fit' line
# y = mlab.normpdf(bins, mu, sigma)
#plt.plot(bins, y, 'r--')
#plt.axvline(mu, color ='r')
plt.title(title)
def unit_mean_freq_hist_compare_cond(category_dataframe, num_bins = 50, plot = 'linear'):
'''
Plots histogram showing the distribution of mean firing rate of each unit in category_dataframe
'''
for cond in category_dataframe['condition'].unique():
cond_table = category_dataframe.query('condition == @cond')
unit_freq_mean = cond_table.groupby(('unit_name'))['spike_freq'].mean()
unit_freq_mean = unit_freq_mean.rename('spike_freq').reset_index()
unit_freq_mean = unit_freq_mean.query('spike_freq > 0')
sigma = unit_freq_mean['spike_freq'].std()
mu = unit_freq_mean['spike_freq'].mean()
if plot == 'linear':
n, bins, patches = plt.hist(unit_freq_mean['spike_freq'], bins = num_bins)
elif plot == 'log':
n, bins, patches = plt.hist(np.log10(unit_freq_mean['spike_freq']), bins = num_bins)
# add a 'best fit' line
y = mlab.normpdf(bins, mu, sigma)
plt.plot(bins, y, 'r--')
plt.axvline(mu, color ='r')
plt.title('Mean Firing Rate per Unit')
def unit_mean_freq_bar(category_dataframe):
'''
Plots histogram showing the distribution of mean firing rate of each unit in category_dataframe
'''
num_cond = len(category_dataframe['condition'].unique())
i=0
for cond in category_dataframe['condition'].unique():
cond_table = category_dataframe.query('condition == @cond')
unit_freq_mean = cond_table.groupby(('unit_name'))['spike_freq'].mean()
unit_freq_mean = unit_freq_mean.rename('spike_freq').reset_index() # Convert the multiindexed series back to a dataframe
unit_freq_mean = unit_freq_mean.query('spike_freq > 0')
sigma = unit_freq_mean['spike_freq'].std()
mu = unit_freq_mean['spike_freq'].mean()
plt.bar(i,mu, yerr=sigma)
i=i+1
plt.xticks([0,1,2,3], category_dataframe['condition'].unique())
plt.title('Mean Firing Rate per Unit')
plt.ylabel('Firing Rate (spk/s)')
def neurons_per_well(cat_table):
'''
Plots a bar plot of the number of active neurons per well
'''
units = cat_table['unit_name'].unique()
wells = np.zeros(48)
ind = range(1,49)
for unit in units:
well = mc.get_well_number(unit)
wells[well-1] = wells[well-1]+1
barlist = plt.bar(ind,wells)
for i in range(0,6):
barlist[i].set_color('r')
for i in range(6,12):
barlist[i].set_color('b')
for i in range(12,18):
barlist[i].set_color('g')
for i in range(18,24):
barlist[i].set_color('y')
for i in range(24,30):
barlist[i].set_color('c')
for i in range(30,36):
barlist[i].set_color('k')
for i in range(36,42):
barlist[i].set_color('m')
plt.title('Neurons per Well')
plt.xlabel('Well Number')
plt.ylabel('Number of Neurons')
def neurons_per_electrode(cat_table):
'''
Plots a bar plot of the number of active neurons per well
'''
units = cat_table['unit_name'].unique()
eles = np.zeros(768)
ind = range(1,769)
for unit in units:
ele = mc.get_electrode_number(unit)
eles[ele-1] = eles[ele-1]+1
barlist = plt.bar(ind,eles)
for i in range(0,96):
barlist[i].set_color('r')
for i in range(96,192):
barlist[i].set_color('b')
for i in range(192,288):
barlist[i].set_color('g')
for i in range(288,384):
barlist[i].set_color('y')
for i in range(384,480):
barlist[i].set_color('c')
for i in range(480,576):
barlist[i].set_color('k')
for i in range(576,672):
barlist[i].set_color('m')
plt.title('Neurons per Electrode')
plt.xlabel('Electrode Number')
plt.ylabel('Number of Neurons')
def heatmap_active_wells(unit_names):
'''
Makes a heatmap comparing the number of active neurons per well
'''
wells = np.zeros((6,8))
for unit in unit_names:
row, col = mc.get_row_col_number_tuple(unit)
wells[row-1,col-1] = wells[row-1,col-1]+1
plt.imshow(wells)
plt.colorbar()
def heatmap_active_electrodes(unit_names):
'''
Makes a heatmap comparing the number of active neurons per electrode
'''
electrodes = np.zeros((24, 32))
for unit in unit_names:
well_row, well_col = mc.get_row_col_number_tuple(unit)
ele = mc.get_electrode_number(unit)
row_in_well, col_in_well = mc.get_ele_row_col_number_tuple(unit)
ele_row = 4*(well_row-1) + row_in_well
ele_col = 4*(well_col-1) + col_in_well
ele_row = int(ele_row)
electrodes[ele_row-1, ele_col-1] = electrodes[ele_row-1, ele_col-1]+1
plt.imshow(electrodes)
plt.colorbar()
for xline in np.arange(-0.5,32,4):
plt.axvline(x = xline)
for yline in np.arange(-0.5,24,4):
plt.axhline(y=yline)
def cdf_foldInduction(b_filter, s_filter, title = ""):
'''
Plots the cumulative distribution function of the fold induction post baseline at various
timepoints during a homeostasis experiment
'''
s_start = s_filter['time'].iloc[0]
hours = np.array([0, 1, 3, 6, 12, 24, 36, 48,])# 72, 96, 120, 144, 168, 192])
max_hours = (max(s_filter['time']) - s_start).days*24 + (max(s_filter['time']) - s_start).seconds/3600
hours = hours[hours<= max_hours]
color_idx = np.linspace(0.1, 1, 1+len(hours))
stim_color_ind = 1
baseline_fold = b_filter.groupby(('unit_name'))['folds'].mean()
baseline_fold = baseline_fold.rename('folds').reset_index()
sort, p = psupp.cdf(baseline_fold['folds'])
plt.plot(sort, p, color='r')#plt.cm.gist_yarg(0.07))
for timepoint in hours:
period_start = s_start + timedelta(hours=timepoint)
period_stop = period_start + timedelta(hours=1)
s_period = s_filter.query('time > @period_start and time < @period_stop')
period_fold = s_period.groupby('unit_name')['folds'].mean()
period_fold = period_fold.rename('folds').reset_index()
sort, p = psupp.cdf(period_fold['folds'])
plt.plot(sort, p, color=plt.cm.gist_yarg(color_idx[stim_color_ind]))
stim_color_ind = stim_color_ind+1
legend_labels = np.append("Baseline", hours)
plt.legend(legend_labels)
plt.xlabel('FR/baseline')
plt.ylabel('Fraction of Population')
plt.xscale('log')
plt.title('CDF ' + title)
plt.xlim([0, 10])
plt.axhline(y=0.5, linestyle='--', color = 'k')
def hist_end_vs_start(units, baseline_stop, cat_table, cond, end_time = 0, nbins=100):
'''
Plots a histogram of the firing rate (of each unit in units) during the last hour divided by the hour before drug was added.
'''
ratio_table = psupp.calc_end_vs_start(units, baseline_stop, cat_table, end_time)
hist_val = ratio_table['ratio']
plt.hist(hist_val, nbins)#, bins=np.logspace(np.log10(0.0001),np.log10(1000), nbins))
plt.xscale('linear')
plt.axvline(1, color='k')
plt.ylabel('Frequency')
plt.xlabel('FR end / FR start')
plt.title(cond)
def scatter_homeo_vs_baseline(units, baseline_stop, cat_table, baseline_table, cond, end_time = 0, nbins=100):
'''
Makes a scatter plot of the end/start ratio on the y-axis, and the average baseline FR on the x-axis, for the units in 'units'.
'''
# Calculate end/start ratio
ratio_table = psupp.calc_end_vs_start(units, baseline_stop, cat_table, end_time)
ratio_table = ratio_table.sort_values(by = 'unit_name')
# Calculate mean FR
baseline_table = baseline_table.loc[baseline_table['unit_name'].isin(units)]
unit_freq_mean = baseline_table.groupby(('unit_name'))['spike_freq'].mean()
unit_freq_mean = unit_freq_mean.rename('spike_freq').reset_index() # Convert the multiindexed series back to a dataframe
unit_freq_mean = unit_freq_mean.sort_values(by = 'unit_name')
#joined = unit_freq_mean.set_index('unit_name').join(ratio_table.set_index('unit_name'), on = 'unit_name')
#caller.join(other.set_index('key'), on='key')
plt.scatter(unit_freq_mean['spike_freq'], ratio_table['ratio'])
plt.xlabel('Mean Baseline FR (Hz)')
plt.ylabel('End/Start')
plt.title(cond)
#return (ratio_table, unit_freq_mean)#, joined)
|
ktyssowski/mea_analysis
|
pymea/plotting.py
|
Python
|
mit
| 45,754
|
[
"NEURON"
] |
1e30a1401156711bddb52cebdf988656e5c45e92bf4f5bee006748998e6e2488
|
# coding=utf-8
import ast
class RewriteAddToSub(ast.NodeTransformer):
def visit_Add(self, node):
node = ast.Sub()
return node
node = ast.parse('2 + 6', mode='eval')
node = RewriteAddToSub().visit(node)
print eval(compile(node, '<string>', 'eval'))
|
dongweiming/web_develop
|
chapter15/section2/ast_transformer.py
|
Python
|
gpl-3.0
| 273
|
[
"VisIt"
] |
f3bc8ab077a95508a87db271ab79ba4cd86107b89faa11439a2417c8f31b79ef
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for mutual information estimators and helper functions."""
import numpy as np
import scipy
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import test_util as tfp_test_util
mi = tfp.vi.mutual_information
tfd = tfp.distributions
LOWER_BOUND_MIN_GAP = 0.3
LOWER_BOUND_MAX_GAP = 0.1
class MutualInformationTest(tfp_test_util.TestCase):
def setUp(self):
super(MutualInformationTest, self).setUp()
self.seed = tfp_test_util.test_seed()
self.scores = tfp_test_util.test_np_rng().normal(
loc=1.0,
scale=2.0,
size=[13, 17])
batch_size = 1000
rho = 0.8
dim = 2
x, eps = tf.split(value=tf.random.normal(shape=(2*batch_size, dim),
seed=self.seed),
num_or_size_splits=2, axis=0)
mean = rho * x
stddev = tf.sqrt(1. - tf.square(rho))
y = mean + stddev * eps
conditional_dist = tfd.MultivariateNormalDiag(
mean, scale_identity_multiplier=stddev)
marginal_dist = tfd.MultivariateNormalDiag(tf.zeros(dim), tf.ones(dim))
# The conditional_scores has its shape [y_batch_dim, distibution_batch_dim]
# as the `lower_bound_info_nce` requires `scores[i, j] = f(x[i], y[j])
# = log p(x[i] | y[j])`.
self.conditional_scores = conditional_dist.log_prob(y[:, tf.newaxis, :])
self.marginal_scores = marginal_dist.log_prob(y)[:, tf.newaxis]
self.optimal_critic = 1 + self.conditional_scores - self.marginal_scores
self.theoretical_mi = np.float32(-0.5 * np.log(1. - rho**2) * dim)
# Y is N-D standard normal distributed.
self.differential_entropy_y = 0.5 * np.log(2 * np.pi * np.e) * dim
def test_check_and_get_mask(self):
test_scores = tf.ones([2, 3])
positive_mask = np.eye(N=2, M=3, dtype=bool)
# create default masks
r_scores, r_pos_mask = mi._check_and_get_mask(test_scores)
self.assertEqual(r_scores.shape, [2, 3])
self.assertAllEqual(self.evaluate(r_pos_mask), positive_mask)
def test_get_masked_scores(self):
scores = np.array([[2., 5., -1e-3],
[-1073., 4.2, -4.]]).astype(np.float32)
mask = scores < 3.
target_res = np.array([[2., -np.inf, -1e-3],
[-1073., -np.inf, -4.]]).astype(np.float32)
func_res = mi._get_masked_scores(scores, mask)
self.assertAllEqual(self.evaluate(func_res), target_res)
def test_masked_logmeanexp(self):
# test1: compare against numpy/scipy implementation.
masked_scores = self.scores
num_masked_ele = np.sum(masked_scores > 0.)
masked_scores[masked_scores <= 0.] = -np.inf
numpy_impl = np.float32(
scipy.special.logsumexp(masked_scores) - np.log(num_masked_ele))
result_0d = mi._masked_logmeanexp(self.scores, self.scores > 0, axis=None)
self.assertAllClose(self.evaluate(result_0d), numpy_impl)
# test2: test against results from composition of numpy functions.
scores_2 = np.array([[2., 5., -1e-3],
[-1073., 4.2, -4.]], dtype=np.float32)
result_empty_sum = mi._masked_logmeanexp(
scores_2, scores_2 < 0., axis=None)
numpy_result = np.log(np.mean(np.exp(scores_2[scores_2 < 0.])))
self.assertAllClose(self.evaluate(result_empty_sum),
numpy_result.astype(np.float32))
# test3: whether `axis` arg works as expected.
result_1d = mi._masked_logmeanexp(self.scores, self.scores > 0, axis=[1,])
self.assertEqual(result_1d.shape, [13,])
def test_lower_bound_barber_agakov(self):
# Test1: against numpy reimplementation
test_scores = tf.random.normal(shape=[100,], stddev=5.)
test_entropy = tf.random.normal(shape=[], stddev=10.)
impl_estimation, test_scores, test_entropy = self.evaluate(
[mi.lower_bound_barber_agakov(logu=test_scores, entropy=test_entropy),
test_scores, test_entropy])
numpy_estimation = np.mean(test_scores) + test_entropy
self.assertAllClose(impl_estimation, numpy_estimation)
# Test2: batched input
test_scores_2 = tf.random.normal(shape=[13, 5], stddev=5.)
test_entropy_2 = tf.random.normal(shape=[13,], stddev=10.)
impl_estimation_2, test_scores_2, test_entropy_2 = self.evaluate(
[mi.lower_bound_barber_agakov(
logu=test_scores_2, entropy=test_entropy_2),
test_scores_2, test_entropy_2])
numpy_estimation_2 = np.mean(test_scores_2, axis=-1) + test_entropy_2
self.assertAllClose(impl_estimation_2, numpy_estimation_2)
# Test3: test example, since the estimation is a lower bound, we test
# by range.
impl_estimation_3 = self.evaluate(
mi.lower_bound_barber_agakov(
logu=tf.linalg.diag_part(self.conditional_scores),
entropy=self.differential_entropy_y))
self.assertAllInRange(
impl_estimation_3,
self.theoretical_mi-LOWER_BOUND_MIN_GAP,
self.theoretical_mi+LOWER_BOUND_MAX_GAP)
def test_lower_bound_info_nce(self):
# Numerical test with correlated gaussian as random variables.
info_nce_bound = self.evaluate(
mi.lower_bound_info_nce(self.conditional_scores))
self.assertAllInRange(
info_nce_bound,
lower_bound=self.theoretical_mi-LOWER_BOUND_MIN_GAP,
upper_bound=self.theoretical_mi+LOWER_BOUND_MAX_GAP)
# Check the masked against none masked version
info_nce_bound_1 = self.evaluate(
mi.lower_bound_info_nce(self.scores))
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
info_nce_bound_2 = self.evaluate(
mi.lower_bound_info_nce(self.scores, positive_mask, validate_args=True))
self.assertAllClose(info_nce_bound_1, info_nce_bound_2)
# Check batched against none batched version
info_nce_bound_3 = self.evaluate(
mi.lower_bound_info_nce(tf.tile(self.scores[None, :, :], [3, 1, 1])))
self.assertAllClose(
info_nce_bound_3,
self.evaluate(tf.tile(info_nce_bound_1[tf.newaxis,], [3])))
def test_lower_bound_jensen_shannon(self):
# Check against numpy implementation.
log_f = self.optimal_critic
js_bound, log_f = self.evaluate([mi.lower_bound_jensen_shannon(log_f),
log_f])
# The following numpy softplus is numerically stable when x is large
# log(1+exp(x)) = log(1+exp(x)) - log(exp(x)) + x = log(1+exp(-x)) + x
numpy_softplus = lambda x: np.log(1+np.exp(-np.abs(x))) + np.maximum(x, 0)
log_f_diag = np.diag(log_f)
n = np.float32(log_f.shape[0])
first_term = np.mean(-numpy_softplus(-log_f_diag))
second_term = (np.sum(numpy_softplus(log_f)) -
np.sum(numpy_softplus(log_f_diag))) / (n * (n - 1.))
numpy_implementation = first_term - second_term
self.assertAllClose(js_bound, numpy_implementation, rtol=1e-5)
# Check the masked against none masked version
js_bound_1 = mi.lower_bound_jensen_shannon(self.scores)
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
js_bound_2 = self.evaluate(
mi.lower_bound_jensen_shannon(self.scores, positive_mask,
validate_args=True))
self.assertAllClose(js_bound_1, js_bound_2)
# Check batched against none batched version
js_bound_3 = self.evaluate(
mi.lower_bound_jensen_shannon(
tf.tile(self.scores[tf.newaxis, :, :], [3, 1, 1])))
self.assertAllClose(
js_bound_3, self.evaluate(tf.tile(js_bound_1[tf.newaxis,], [3])))
def test_lower_bound_nguyen_wainwright_jordan(self):
# Numerical test against theoretical values
nwj_bound = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(self.optimal_critic))
self.assertAllInRange(
nwj_bound,
lower_bound=self.theoretical_mi-LOWER_BOUND_MIN_GAP,
upper_bound=self.theoretical_mi+LOWER_BOUND_MAX_GAP)
# Check the masked against none masked version
nwj_bound_1 = mi.lower_bound_nguyen_wainwright_jordan(self.scores)
positive_mask = np.eye(self.scores.shape[0], self.scores.shape[1])
nwj_bound_2 = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(
self.scores, positive_mask, validate_args=True))
self.assertAllClose(nwj_bound_1, nwj_bound_2)
# Check batched against none batched version
nwj_bound_3 = self.evaluate(
mi.lower_bound_nguyen_wainwright_jordan(
tf.tile(self.scores[tf.newaxis, :, :], [3, 1, 1])))
self.assertAllClose(
nwj_bound_3, self.evaluate(tf.tile(nwj_bound_1[None,], [3])))
if __name__ == '__main__':
tfp_test_util.main()
|
tensorflow/probability
|
tensorflow_probability/python/vi/mutual_information_test.py
|
Python
|
apache-2.0
| 9,306
|
[
"Gaussian"
] |
606a2d1cc6fe6ede71d23e10cc05e756e07fb971c5ba76b434ed2a1beb3dbb68
|
# Copyright 2008 Brian Boyer, Ryan Mark, Angela Nitzke, Joshua Pollock,
# Stuart Tiffen, Kayla Webley and the Medill School of Journalism, Northwestern
# University.
#
# This file is part of Crunchberry Pie.
#
# Crunchberry Pie is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Crunchberry Pie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with Crunchberry Pie. If not, see <http://www.gnu.org/licenses/>.
from django import template
from django.conf import settings
from profiles.models import UserProfile
from authentication.models import FacebookTemplate
register = template.Library()
@register.inclusion_tag('facebook/js.html')
def show_facebook_js():
return {'facebook_api_key': settings.FACEBOOK_API_KEY}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_name(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
if settings.WIDGET_MODE:
#if we're rendering widgets, link direct to facebook
return {'string':u'<a href="%s">%s</a>' % (p.profile_url,p.full_name)}
else:
return {'string':u'<a href="%s">%s</a>' % (p.get_absolute_url(),p.full_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_first_name(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
return {'string':u'<a href="%s">%s</a>' % (p.get_absolute_url(),p.first_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_possesive(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
return {'string':u'<fb:name uid="%i" possessive="true" linked="false"></fb:name>' % p.facebook_id}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_greeting(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
return {'string':u'Hello, <a href="%s">%s</a>!' % (p.get_absolute_url(),p.first_name)}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_status(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
return {'string':p.status}
@register.inclusion_tag('facebook/show_string.html',takes_context=True)
def show_facebook_photo(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
if settings.WIDGET_MODE:
#if we're rendering widgets, link direct to facebook
return {'string':u'<a href="%s"><img src="%s" alt="%s"/></a>' % (p.profile_url, p.picture_url, p.full_name)}
else:
return {'string':u'<a href="%s"><img src="%s" alt="%s"/></a>' % (p.get_absolute_url(), p.picture_url, p.full_name)}
@register.inclusion_tag('facebook/display.html',takes_context=True)
def show_facebook_info(context,user):
if isinstance(user,UserProfile):
p = user
else:
p = user.get_profile()
return {'profile_url':p.get_absolute_url(), 'picture_url':p.picture_url, 'full_name':p.full_name,'networks':p.networks}
@register.inclusion_tag('facebook/feed_script.html',takes_context=True)
def show_feed_script(context,template_bundle_name):
template = FacebookTemplate.objects.get(name=template_bundle_name)
return {'template_bundle_id':template.template_bundle_id}
@register.inclusion_tag('facebook/mosaic.html')
def show_profile_mosaic(profiles):
return {'profiles':profiles}
@register.inclusion_tag('facebook/connect_button.html',takes_context=True)
def show_connect_button(context,javascript_friendly=False):
req = context['request']
if req.path.startswith('/accounts/login'):
#this happens if login_required decorator sent us to the login page
next = getattr(req.GET,'next','')
elif 'next' in req.GET:
#logging in with the quips widget will do this
next = req.GET['next']
else:
next = context.get('next',req.path)
return {'next':next,'javascript_friendly':javascript_friendly}
|
brianboyer/newsmixer
|
pie/authentication/templatetags/facebook.py
|
Python
|
gpl-3.0
| 4,610
|
[
"Brian"
] |
90c4ee25d169fb1a5296c68e052858d23c9609bfefea92213d3093d952b1a307
|
'''
AST Rewrite Pass to join Else nodes
'''
from ..util.dispatch import method_store, multimethod
from .. import node
class JoinElse(object):
_store = method_store()
@multimethod(_store)
def visit(self, n):
pass
@visit.d(node.Block)
def _(self, n):
self.scan(n.exprs)
@visit.d(node.Module)
def _(self, n):
self.scan(n.exprs)
def scan(self, exprs):
'''
Scan a list of exprs, joining neighboring If and Else nodes into a single If node.
The 'hole' left by the Else is replaced with a NoOp.
'''
prev = None
for j in range(1, len(exprs)):
i = j - 1
prev = exprs[i]
curr = exprs[j]
if isinstance(prev, node.If):
if isinstance(curr, node.Else):
prev.else_body = curr.body
# exprs[i] = prev
exprs[j] = node.NoOp()
|
dacjames/mara-lang
|
bootstrap/mara/passes/join_else.py
|
Python
|
mit
| 946
|
[
"VisIt"
] |
fa832770e30fc01777d3dfc40de78d92e59a0bcaa059f9f67f7e7559cc9e501c
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# PyNNLess -- Yet Another PyNN Abstraction Layer
# Copyright (C) 2015 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Simple network consisting of 10 disconnected neurons and a spike source array.
"""
import sys
import common.setup # Common example code (checks command line parameters)
import common.params # Parameters for the models which work with all systems
import common.utils # Output functions
import pynnless as pynl
# Create a new pl instance with the given backend
backend = sys.argv[1]
sim = pynl.PyNNLess(backend)
# Create and run network with two populations: One population consisting of a
# spike source arrays and another population consisting of neurons.
print("Simulating network...")
count = 10
res = sim.run(pynl.Network()
.add_source(spike_times=[100.0 * i for i in xrange(1, 9)])
.add_population(
pynl.IfCondExpPopulation(
count=count,
params=common.params.IF_cond_exp)
.record_spikes()
)
.add_connections([((0, 0), (1, i), 0.024, 0.0) for i in xrange(count)]))
print("Done!")
# Write the spike times for each neuron to disk
print("Writing spike times to " + common.setup.outfile)
common.utils.write_spike_times(common.setup.outfile, res[1]["spikes"])
|
hbp-sanncs/pynnless
|
examples/multiple_neurons.py
|
Python
|
gpl-3.0
| 1,961
|
[
"NEURON"
] |
547cdd0afafdff26448f58b7fe6be15ed636ca55bb473e36e16c5fcc180a7d45
|
# Imports
import os
import h5py
import numpy as np
from collections import Counter, defaultdict, namedtuple
from gatktool import tool
# Keras Imports
import keras.backend as K
# Package Imports
from . import defines
from . import tensor_maps
READ_ELEMENTS = 8
Read = namedtuple("Read", "seq qual cigar reverse mate_reverse first mapping_quality reference_start")
Variant = namedtuple("Variant", "contig pos ref alt type")
CIGAR_CODES_TO_COUNT = [
defines.CIGAR_CODE['M'], defines.CIGAR_CODE['I'], defines.CIGAR_CODE['S'], defines.CIGAR_CODE['D']
]
p_lut = np.zeros((256,))
not_p_lut = np.zeros((256,))
for i in range(256):
exponent = float(-i) / 10.0
p_lut[i] = 1.0 - (10.0**exponent)
not_p_lut[i] = (1.0 - p_lut[i]) / 3.0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# ~~~~~~~ Inference ~~~~~~~~~~~~~~~
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def score_and_write_batch(args, model, file_out, batch_size, python_batch_size, tensor_dir):
'''Score a batch of variants with a CNN model. Write tab delimited temp file with scores.
This function is tightly coupled with the CNNScoreVariants.java
It requires data written to the fifo in the order given by transferToPythonViaFifo
Arguments
args: Namespace with command line or configuration file set arguments
model: a keras model
file_out: The VCF file where variants scores are written
fifo: The fifo opened by GATK Streaming executor
batch_size: The total number of variants available in the fifo
python_batch_size: the number of variants to process in each inference
tensor_dir : If this path exists write hd5 files for each tensor (optional for debugging)
'''
annotation_batch = []
reference_batch = []
variant_types = []
variant_data = []
read_batch = []
for _ in range(batch_size):
fifo_line = tool.readDataFIFO()
fifo_data = fifo_line.split(defines.SEPARATOR_CHAR)
variant_data.append(fifo_data[0] + '\t' + fifo_data[1] + '\t' + fifo_data[2] + '\t' + fifo_data[3])
reference_batch.append(reference_string_to_tensor(fifo_data[4]))
annotation_batch.append(annotation_string_to_tensor(args, fifo_data[5]))
variant_types.append(fifo_data[6].strip())
fidx = 7 # 7 Because above we parsed: contig pos ref alt reference_string annotation variant_type
if args.tensor_name in defines.TENSOR_MAPS_2D and len(fifo_data) > fidx:
read_tuples = []
var = Variant(fifo_data[0], int(fifo_data[1]), fifo_data[2], fifo_data[3], fifo_data[6])
while fidx+7 < len(fifo_data):
read_tuples.append( Read(fifo_data[fidx],
list(map(int, fifo_data[fidx+1].split(','))),
fifo_data[fidx+2],
bool_from_java(fifo_data[fidx+3]),
bool_from_java(fifo_data[fidx+4]),
bool_from_java(fifo_data[fidx+5]),
int(fifo_data[fidx+6]),
int(fifo_data[fidx+7])))
fidx += READ_ELEMENTS
_, ref_start, _ = get_variant_window(args, var)
insert_dict = get_inserts(args, read_tuples, var)
tensor = read_tuples_to_read_tensor(args, read_tuples, ref_start, insert_dict)
reference_sequence_into_tensor(args, fifo_data[4], tensor, insert_dict)
if os.path.exists(tensor_dir):
_write_tensor_to_hd5(args, tensor, annotation_batch[-1], fifo_data[0], fifo_data[1], fifo_data[6])
read_batch.append(tensor)
if args.tensor_name in defines.TENSOR_MAPS_1D:
predictions = model.predict([np.array(reference_batch), np.array(annotation_batch)],
batch_size=python_batch_size)
elif args.tensor_name in defines.TENSOR_MAPS_2D:
predictions = model.predict(
{args.tensor_name:np.array(read_batch), args.annotation_set:np.array(annotation_batch)},
batch_size=python_batch_size)
else:
raise ValueError('Unknown tensor mapping. Check architecture file.', args.tensor_name)
indel_scores = predictions_to_indel_scores(predictions)
snp_scores = predictions_to_snp_scores(predictions)
for i in range(batch_size):
if 'SNP' == variant_types[i]:
file_out.write(variant_data[i]+'\t{0:.3f}'.format(snp_scores[i])+'\n')
elif 'INDEL' == variant_types[i]:
file_out.write(variant_data[i]+'\t{0:.3f}'.format(indel_scores[i])+'\n')
else:
file_out.write(variant_data[i]+'\t{0:.3f}'.format(max(snp_scores[i], indel_scores[i]))+'\n')
def reference_string_to_tensor(reference):
dna_data = np.zeros((len(reference), len(defines.DNA_SYMBOLS)))
for i,b in enumerate(reference):
if b in defines.DNA_SYMBOLS:
dna_data[i, defines.DNA_SYMBOLS[b]] = 1.0
elif b in defines.AMBIGUITY_CODES:
dna_data[i] = defines.AMBIGUITY_CODES[b]
elif b == '\x00':
break
else:
raise ValueError('Error! Unknown code:', b)
return dna_data
def annotation_string_to_tensor(args, annotation_string):
name_val_pairs = annotation_string.split(';')
name_val_arrays = [p.split('=') for p in name_val_pairs]
annotation_map = {str(p[0]).strip() : p[1] for p in name_val_arrays if len(p) > 1}
annotation_data = np.zeros(( len(defines.ANNOTATIONS[args.annotation_set]),))
for i,a in enumerate(defines.ANNOTATIONS[args.annotation_set]):
if a in annotation_map:
annotation_data[i] = annotation_map[a]
return annotation_data
def get_inserts(args, read_tuples, variant, sort_by='base'):
'''A dictionary mapping insertions to reference positions.
Ignores artificial haplotype read group.
Relies on pysam's cigartuples structure see: http://pysam.readthedocs.io/en/latest/api.html
Match, M -> 0
Insert, I -> 1
Deletion, D -> 2
Ref Skip, N -> 3
Soft Clip, S -> 4
Arguments:
args.read_limit: maximum number of reads to return
samfile: the BAM (or BAMout) file
variant: the variant around which reads will load
Returns:
insert_dict: a dict mapping read indices to max insertions at that point
'''
insert_dict = {}
idx_offset, ref_start, ref_end = get_variant_window(args, variant)
for read in read_tuples:
index_dif = ref_start - read.reference_start
if abs(index_dif) >= args.window_size:
continue
if 'I' in read.cigar:
cur_idx = 0
for t in cigar_string_to_tuples(read.cigar):
if t[0] == defines.CIGAR_CODE['I']:
insert_idx = cur_idx - index_dif
if insert_idx not in insert_dict:
insert_dict[insert_idx] = t[1]
elif insert_dict[insert_idx] < t[1]:
insert_dict[insert_idx] = t[1]
if t[0] in CIGAR_CODES_TO_COUNT:
cur_idx += t[1]
read_tuples.sort(key=lambda read: read.reference_start)
if sort_by == 'base':
read_tuples.sort(key=lambda read: get_base_to_sort_by(read, variant))
return insert_dict
def get_base_to_sort_by(read, variant):
if len(read.seq) > 0:
max_idx = len(read.seq)-1
else:
return 'Z'
if variant.type == 'SNP':
return read.seq[clamp((variant.pos-read.reference_start), 0, max_idx)]
else:
var_idx = (variant.pos-read.reference_start)+1
cur_idx = 0
for cur_op, length in cigar_string_to_tuples(read.cigar):
cur_idx += length
if cur_idx > var_idx:
if cur_op == defines.CIGAR_CODE['M']:
return read.seq[clamp(var_idx, 0, max_idx)]
else:
return defines.CODE2CIGAR[cur_op]
return 'Y'
def cigar_string_to_tuples(cigar):
if not cigar or len(cigar) == 0:
return []
parts = defines.CIGAR_REGEX.findall(cigar)
# reverse order
return [(defines.CIGAR2CODE[y], int(x)) for x,y in parts]
def get_variant_window(args, variant):
index_offset = (args.window_size//2)
reference_start = variant.pos-index_offset
reference_end = variant.pos + index_offset + (args.window_size%2)
return index_offset, reference_start, reference_end
def bool_from_java(val):
return val == 'true'
def clamp(n, minn, maxn):
return max(min(maxn, n), minn)
def read_tuples_to_read_tensor(args, read_tuples, ref_start, insert_dict):
'''Create a read tensor based on a tensor channel map.
Assumes read pairs have the same name.
Only loads reads that might align inside the tensor.
Arguments:
args.read_limit: maximum number of reads to return
read_tuples: list of reads to make arrays from
ref_start: the beginning of the window in reference coordinates
insert_dict: a dict mapping read indices to max insertions at that point.
Returns:
tensor: 3D read tensor.
'''
channel_map = tensor_maps.get_tensor_channel_map_from_args(args)
tensor = np.zeros(tensor_maps.tensor_shape_from_args(args))
if len(read_tuples) > args.read_limit:
read_tuples_idx = np.random.choice(range(len(read_tuples)), size=args.read_limit, replace=False)
read_tuples = [read_tuples[i] for i in read_tuples_idx]
for j,read in enumerate(read_tuples):
rseq, rqual = sequence_and_qualities_from_read(args, read, ref_start, insert_dict)
flag_start = -1
flag_end = 0
for i,b in enumerate(rseq):
if i == args.window_size:
break
if b == defines.SKIP_CHAR:
continue
elif flag_start == -1:
flag_start = i
else:
flag_end = i
if b in args.input_symbols:
if b == defines.INDEL_CHAR:
if K.image_data_format() == 'channels_last':
tensor[j, i, args.input_symbols[b]] = 1.0
else:
tensor[args.input_symbols[b], j, i] = 1.0
else:
hot_array = quality_from_mode(args, rqual[i], b, args.input_symbols)
if K.image_data_format() == 'channels_last':
tensor[j, i, :4] = hot_array
else:
tensor[:4, j, i] = hot_array
elif b in defines.AMBIGUITY_CODES:
if K.image_data_format() == 'channels_last':
tensor[j, i, :4] = defines.AMBIGUITY_CODES[b]
else:
tensor[:4, j, i] = defines.AMBIGUITY_CODES[b]
else:
raise ValueError('Unknown symbol in seq block:', b)
if K.image_data_format() == 'channels_last':
tensor[j, flag_start:flag_end, channel_map['flag_bit_4']] = 1.0 if read.reverse else 0.0
tensor[j, flag_start:flag_end, channel_map['flag_bit_5']] = 1.0 if read.mate_reverse else 0.0
tensor[j, flag_start:flag_end, channel_map['flag_bit_6']] = 1.0 if read.first else 0.0
tensor[j, flag_start:flag_end, channel_map['flag_bit_7']] = 0.0 if read.first else 1.0
else:
tensor[channel_map['flag_bit_4'], j, flag_start:flag_end] = 1.0 if read.reverse else 0.0
tensor[channel_map['flag_bit_5'], j, flag_start:flag_end] = 1.0 if read.mate_reverse else 0.0
tensor[channel_map['flag_bit_6'], j, flag_start:flag_end] = 1.0 if read.first else 0.0
tensor[channel_map['flag_bit_7'], j, flag_start:flag_end] = 0.0 if read.first else 1.0
if 'mapping_quality' in channel_map:
mq = float(read.mapping_quality) / defines.MAPPING_QUALITY_MAX
if K.image_data_format() == 'channels_last':
tensor[j, flag_start:flag_end, channel_map['mapping_quality']] = mq
else:
tensor[channel_map['mapping_quality'], j, flag_start:flag_end] = mq
return tensor
def sequence_and_qualities_from_read(args, read, ref_start, insert_dict):
cur_idx = 0
my_indel_dict = {}
no_qual_filler = 0
index_dif = ref_start - read.reference_start
for t in cigar_string_to_tuples(read.cigar):
my_ref_idx = cur_idx - index_dif
if t[0] == defines.CIGAR_CODE['I'] and my_ref_idx in insert_dict:
my_indel_dict[my_ref_idx] = insert_dict[my_ref_idx] - t[1]
elif t[0] == defines.CIGAR_CODE['D']:
my_indel_dict[my_ref_idx] = t[1]
if t[0] in CIGAR_CODES_TO_COUNT:
cur_idx += t[1]
for k in insert_dict.keys():
if k not in my_indel_dict:
my_indel_dict[k] = insert_dict[k]
rseq = read.seq[:args.window_size]
rqual = read.qual[:args.window_size]
if index_dif > 0:
rseq = rseq[index_dif:]
rqual = rqual[index_dif:]
elif index_dif < 0:
rseq = defines.SKIP_CHAR * (-index_dif) + rseq
rqual = [no_qual_filler]*(-index_dif) + rqual
for j in sorted(my_indel_dict.keys(), key=int, reverse=True):
if j < 1:
rseq = (defines.INDEL_CHAR * my_indel_dict[j]) + rseq
rqual = ([no_qual_filler]*my_indel_dict[j]) + rqual
else:
rseq = rseq[:j] + (defines.INDEL_CHAR * my_indel_dict[j]) + rseq[j:]
rqual = rqual[:j] + ([no_qual_filler]*my_indel_dict[j]) + rqual[j:]
return rseq, rqual
def reference_sequence_into_tensor(args, reference_seq, tensor, insert_dict):
ref_offset = len(set(args.input_symbols.values()))
for i in sorted(insert_dict.keys(), key=int, reverse=True):
if i < 0:
reference_seq = defines.INDEL_CHAR*insert_dict[i] + reference_seq
else:
reference_seq = reference_seq[:i] + defines.INDEL_CHAR*insert_dict[i] + reference_seq[i:]
for i,b in enumerate(reference_seq):
if i == args.window_size:
break
if b in args.input_symbols:
if args.channels_last:
tensor[:, i, ref_offset+args.input_symbols[b]] = 1.0
else:
tensor[ref_offset+args.input_symbols[b], :, i] = 1.0
elif b in defines.AMBIGUITY_CODES:
if args.channels_last:
tensor[:, i, ref_offset:ref_offset+4] = np.tile(defines.AMBIGUITY_CODES[b], (args.read_limit, 1))
else:
tensor[ref_offset:ref_offset+4, :, i] = np.transpose(np.tile(defines.AMBIGUITY_CODES[b], (args.read_limit, 1)))
def base_quality_to_phred_array(base_quality, base, base_dict):
phred = np.zeros((4,))
exponent = float(-base_quality) / 10.0
p = 1.0-(10.0**exponent) # Convert to probability
not_p = (1.0-p) / 3.0 # Error could be any of the other 3 bases
not_base_quality = -10 * np.log10(not_p) # Back to Phred
for b in base_dict.keys():
if b == defines.INDEL_CHAR:
continue
elif b == base:
phred[base_dict[b]] = base_quality
else:
phred[base_dict[b]] = not_base_quality
return phred
def base_quality_to_p_hot_array(base_quality, base, base_dict):
not_p = not_p_lut[base_quality]
phot = [not_p, not_p, not_p, not_p]
phot[base_dict[base]] = p_lut[base_quality]
return phot
def quality_from_mode(args, base_quality, base, base_dict):
if args.base_quality_mode == 'phot':
return base_quality_to_p_hot_array(base_quality, base, base_dict)
elif args.base_quality_mode == 'phred':
return base_quality_to_phred_array(base_quality, base, base_dict)
elif args.base_quality_mode == '1hot':
one_hot = np.zeros((4,))
one_hot[base_dict[base]] = 1.0
return one_hot
else:
raise ValueError('Unknown base quality mode:', args.base_quality_mode)
def predictions_to_snp_scores(predictions, eps=1e-7):
snp = predictions[:, defines.SNP_INDEL_LABELS['SNP']]
not_snp = predictions[:, defines.SNP_INDEL_LABELS['NOT_SNP']]
return np.log(eps + snp / (not_snp + eps))
def predictions_to_indel_scores(predictions, eps=1e-7):
indel = predictions[:, defines.SNP_INDEL_LABELS['INDEL']]
not_indel = predictions[:, defines.SNP_INDEL_LABELS['NOT_INDEL']]
return np.log(eps + indel / (not_indel + eps))
def predictions_to_snp_indel_scores(predictions):
snp_dict = predictions_to_snp_scores(predictions)
indel_dict = predictions_to_indel_scores(predictions)
return snp_dict, indel_dict
def _write_tensor_to_hd5(args, tensor, annotations, contig, pos, variant_type):
tensor_path = os.path.join(args.output_dir, 'inference_tensor_'+contig+pos+variant_type+defines.TENSOR_SUFFIX)
if not os.path.exists(os.path.dirname(tensor_path)):
os.makedirs(os.path.dirname(tensor_path))
with h5py.File(tensor_path, 'w') as hf:
hf.create_dataset(args.tensor_name, data=tensor, compression='gzip')
hf.create_dataset(args.annotation_set, data=annotations, compression='gzip')
|
ksthesis/gatk
|
src/main/python/org/broadinstitute/hellbender/vqsr_cnn/vqsr_cnn/inference.py
|
Python
|
bsd-3-clause
| 17,194
|
[
"pysam"
] |
dca083ec3f34541ec9da9c57470b4c3d268856c99a60997804a47901a778dd28
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgird to sendemails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY, AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('Michael Mitrofanov <noreply@example.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[test-for-brian-criswell] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
mikemitr/test-for-brian-criswell
|
test-for-brian-criswell/config/production.py
|
Python
|
bsd-3-clause
| 4,374
|
[
"Brian"
] |
97332cadbf2656686c69dd0066ac42d8f46785bd36bb137a20a3d4b8b972c113
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from __future__ import unicode_literals # Tenho dúvidas se necessário.
#
#title : frases_curtas.py
#description : This script will make scraping the web
# : for use in social networks
#author : @Py3in
#date start : 20151113
#last update : by github
#version : 0.2 alfa
#usage : python frases_curtas.py --help
#notes : Install python 2.7+ for to use this script.
#python_version : 2.7.6 - (default, Jun 22 2015, 17:58:13)
import re
import random
from time import sleep
try:
from splinter import Browser
except ImportError:
print('please, install splinter\npip install splinter')
try:
from lxml import html
except ImportError:
print('please, install lxml\npip install lxml')
# Global vars temporary... future via argparse
# in future via argparse
url_start = "http://pensador.uol.com.br/frases_curtas/"
# In future via argparse
prefix_file_tmp = "fc_tmp_"
# Global counter on clicks
last_visit = 0
# Attention: Max limit counter clicks for tests.
# Move none when in production
max_clicks = 10
# Zero left for compose file temp.
# Warning for your site > 1000 page clicks...
def zero_left(last_visit):
z = "0000" + str(last_visit)
zl = z[-4:]
return zl
# Security use: Random sleep for new click (defaut 3, 15)
r_min = 3 # in future via argparse
r_max = 15 # in future via argparse
def random_click():
_r = random.randint(r_min, r_max)
sleep(_r)
end_sleep = "Return of sleep "
return end_sleep
def save_next_page_clicked(last_visit,browser.html):
file_grv = prefix_file_tmp+zero_left(last_visit)+".html"
file_tmp = browser.html.encode('utf8')
f = open(file_grv, 'wb')
f.write(file_tmp)
f.close()
with Browser() as browser:
browser.visit(url_start)
snpc = save_next_page_clicked(last_visit,browser.html)
# Aqui vai entrar o while.
# Ainda estudando como dividir as etapas abaixo em funções.
last_visit = last_visit + 1
print last_visit
print " vou dormir "
next_click = random_click()
print str(next_click)
link_found = browser.find_link_by_partial_text('Próxima')
link_found.first.click()
file_grv = prefix_file_tmp+zero_left(last_visit)+".html"
file_tmp = browser.html.encode('utf8')
f = open(file_grv, 'wb')
f.write(file_tmp)
f.close()
last_visit = last_visit + 1
print last_visit
print " vou dormir "
next_click = random_click()
print str(next_click)
link_found = browser.find_link_by_partial_text('Próxima')
link_found.first.click()
file_grv = prefix_file_tmp+str(last_visit)+".html"
file_tmp = browser.html.encode('utf8')
f = open(file_grv, 'wb')
f.write(file_tmp)
f.close()
last_visit = last_visit + 1
|
py3in/social_frases
|
social_frases.py
|
Python
|
gpl-2.0
| 2,867
|
[
"VisIt"
] |
2d71613d5ddf0db4070ec13775873b4926de2fb3d44c2aa16265499dbd0723ba
|
"""
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y.reshape(1, -1))
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y.reshape(1, -1))
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
|
marcocaccin/scikit-learn
|
examples/decomposition/plot_sparse_coding.py
|
Python
|
bsd-3-clause
| 3,876
|
[
"Gaussian"
] |
41678f73ba12dd7e8d4a5a30d8f663b8695ba861f2b983fff4b388790d36ea7b
|
"""
FeedFinder
==============
Tries to find feeds for
a given URL.
This is essentially a rewrite
of feedfinder.py,
originally by Mark Pilgrim
and Aaron Swartz.
Credits from the original:
Abe Fettig for a patch to sort Syndic8 feeds by popularity
Also Jason Diamond, Brian Lalor for bug reporting and patches
Original is located at:
http://www.aaronsw.com/2002/feedfinder/
How it works:
0. At every step, feeds are minimally verified to make sure they are really feeds.
1. If the URI points to a feed, it is simply returned; otherwise
the page is downloaded and the real fun begins.
2. Feeds pointed to by LINK tags in the header of the page (autodiscovery)
3. <A> links to feeds on the same server ending in ".rss", ".rdf", ".xml", or
".atom"
4. <A> links to feeds on the same server containing "rss", "rdf", "xml", or "atom"
5. Try some guesses about common places for feeds (index.xml, atom.xml, etc.).
6. <A> links to feeds on external servers ending in ".rss", ".rdf", ".xml", or
".atom"
7. <A> links to feeds on external servers containing "rss", "rdf", "xml", or "atom"
Copyright:
2002-2004: Mark Pilgrim
2006: Aaron Swartz
2013: Francis Tseng
"""
# Python 2.7 support.
try:
from urllib import request, parse
except ImportError:
import urllib2 as request
import urlparse as parse
from socket import error as SocketError
import errno
import lxml.html
import chardet
def feeds(url):
"""
Tries to find feeds
for a given URL.
"""
url = _full_url(url)
data = _get(url)
# Check if the url is a feed.
if _is_feed(url):
return [url]
# Try to get feed links from markup.
try:
feed_links = [link for link in _get_feed_links(data, url) if _is_feed(link)]
except:
feed_links = []
if feed_links:
return feed_links
# Try 'a' links.
try:
links = _get_a_links(data)
except:
links = []
if links:
# Filter to only local links.
local_links = [link for link in links if link.startswith(url)]
# Try to find feed links.
feed_links.extend(_filter_feed_links(local_links))
# If still nothing has been found...
if not feed_links:
# Try to find feed-looking links.
feed_links.extend(_filter_feedish_links(local_links))
# If still nothing has been found...
if not feed_links:
# BRUTE FORCE IT!
guesses = [
'atom.xml', # Blogger, TypePad
'index.atom', # MoveableType
'index.rdf', # MoveableType
'rss.xml', # Dave Winer/Manila
'index.xml', # MoveableType
'index.rss', # Slash
'feed' # WordPress
]
tries = [parse.urljoin(url, g) for g in guesses]
feed_links.extend([link for link in tries if _is_feed(link)])
# If *still* nothing has been found,
# just try all the links.
if links and not feed_links:
feed_links.extend(_filter_feed_links(links))
feed_links.extend(_filter_feedish_links(links))
# Filter out duplicates.
return list(set(feed_links))
def feed(url):
feed_links = feeds(url)
if feed_links:
return feed_links[0]
else:
return None
def _full_url(url):
"""
Assemble the full url
for a url.
"""
url = url.strip()
for x in ['http', 'https']:
if url.startswith('%s://' % x):
return url
return 'http://%s' % url
def _get_feed_links(data, url):
"""
Try to get feed links
defined in the markup.
"""
FEED_TYPES = ('application/rss+xml',
'text/xml',
'application/atom+xml',
'application/x.atom+xml',
'application/x-atom+xml')
links = []
html = lxml.html.fromstring(data)
# For each link...
for link in html.xpath('//link'):
# Try to get the 'rel' attribute.
rel = link.attrib.get('rel', False)
href = link.attrib.get('href', False)
type = link.attrib.get('type', False)
# Check some things.
if not rel or not href or not type: continue
if 'alternate' not in rel.split(): continue
if type not in FEED_TYPES: continue
links.append(parse.urljoin(url, href))
return links
def _get_a_links(data):
"""
Gathers all 'a' links
from the markup.
"""
html = lxml.html.fromstring(data)
return html.xpath('//a/@href')
def _is_feed(url):
"""
Test if a given URL is
a feed.
"""
# If it's not HTTP or HTTPS,
# it's not a feed.
scheme = parse.urlparse(url).scheme
if scheme not in ('http', 'https'):
return 0
data = _get(url)
# If an html tag is present,
# assume it's not a feed.
if data.count('<html'):
return 0
return data.count('<rss') + data.count('<rdf') + data.count('<feed')
def _is_feed_link(url):
"""
Check if a link is
a feed link.
"""
return url[-4:] in ('.rss', '.rdf', '.xml', '.atom')
def _filter_feed_links(links):
"""
Filters a list of links
for only feed links.
"""
candidates = [link for link in links if _is_feed_link(link)]
return [link for link in candidates if _is_feed(link)]
def _filter_feedish_links(links):
"""
Filters a list of links
for links that *look* like
they may be feed links.
"""
feed_links = []
for link in links:
if link.count('rss') + link.count('rdf') + link.count('xml') + link.count('atom'):
if _is_feed(link):
feed_links.append(link)
return feed_links
def _get(url):
"""
Tries to access the url
and return its data.
"""
req = request.Request(url)
try:
resp = request.urlopen(req)
body = resp.read()
# Use Chardet to determine the encoding.
encoding = chardet.detect(body)['encoding']
return body.decode(encoding)
except request.HTTPError as e:
print('HTTP Error:', e.code, url)
return ''
except request.URLError as e:
print('URL Error:', e.reason, url)
return ''
#this doesn't exist in 2.7:
# except ConnectionResetError as e:
# print('Connection Error:', e.reason, url)
# return ''
except SocketError as e:
if e.errno != errno.ECONNRESET:
raise # Not error we are looking for
print('Connection Error:', str(e), url) # Handle error here.
return ''
|
jakemadison/FeedEater
|
feedeater/controller/feedfinder_new.py
|
Python
|
agpl-3.0
| 6,640
|
[
"Brian"
] |
a589c0fed2ddda77c98ddab88b747dede34b4019f69432441899df45f8c340c2
|
# encoding: utf-8
# Wellcome Trust Sanger Institute and Imperial College London
# Copyright (C) 2020 Wellcome Trust Sanger Institute and Imperial College London
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Generic imports
import sys
import argparse
import re
# Phylogenetic imports
import dendropy
# Biopython imports
from Bio import AlignIO
from Bio import Phylo
from Bio import SeqIO
from Bio.Align import MultipleSeqAlignment
from Bio.Seq import Seq
# command line parsing
def get_options():
parser = argparse.ArgumentParser(description='Extract a clade from a Gubbins output',
prog='extract_clade')
# input options
parser.add_argument('--list',
help = 'List of sequences to extract',
required = True)
parser.add_argument('--aln',
help = 'Input alignment (FASTA format)',
required = True)
parser.add_argument('--gff',
help = 'GFF of recombinant regions detected by Gubbins',
required = True)
parser.add_argument('--tree',
help = 'Final tree generated by Gubbins',
required = True)
parser.add_argument('--out',
help = 'Output file prefix',
required = True)
parser.add_argument('--out-fmt',
help = 'Format of output alignment',
default = 'fasta')
parser.add_argument('--missing-char',
help = 'Character used to replace recombinant sequence',
default = '-')
return parser.parse_args()
# main code
if __name__ == "__main__":
# Get command line options
args = get_options()
# Parse list of input sequences
subset = set()
# Read in FASTA assemblies
with open(args.list,'r') as seq_list:
for line in seq_list.readlines():
subset.add(line.strip().split()[0])
# Extract from alignment
output_aln_name = args.out + '.aln'
names_in_alignment = set()
with open(output_aln_name,'w') as out_aln:
alignment = AlignIO.read(args.aln,'fasta')
for taxon in alignment:
names_in_alignment.add(taxon.id)
if taxon.id in subset:
SeqIO.write(taxon, out_aln, args.out_fmt)
# Check subset sequences are found in alignment
not_found_in_dataset = subset - names_in_alignment
if len(not_found_in_dataset) > 0:
sys.stderr.write('Sequences in subset missing from alignment: ' + \
str(not_found_in_dataset) + '\n')
sys.exit(1)
# Prune from the tree
output_tree_name = args.out + '.tree'
tree = dendropy.Tree.get(path = args.tree,
schema = 'newick',
preserve_underscores = True)
tree.retain_taxa_with_labels(subset)
tree.write_to_path(output_tree_name,
'newick')
# Identify relevant recombination blocks
output_gff_name = args.out + '.gff'
taxon_pattern = re.compile('taxa="([^"]*)"')
with open(args.gff,'r') as in_gff, open(output_gff_name,'w') as out_gff:
for line in in_gff.readlines():
if line.startswith('##'):
out_gff.write(line)
else:
info = line.rstrip().split('\t')
taxon_set = set(taxon_pattern.search(info[8]).group(1).split())
if not taxon_set.isdisjoint(subset):
out_gff.write(line)
|
sanger-pathogens/gubbins
|
python/scripts/extract_clade.py
|
Python
|
gpl-2.0
| 4,276
|
[
"Biopython"
] |
de344b81cbedef3896d28c8d3a66de29021bdc0caa78de05cc648673ce109173
|
# 4455770 Dennis Verheijden KI
# 4474139 Remco van der Heijden KI
# multiAgents.py
# --------------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
from util import manhattanDistance, nearestPoint
from game import Directions, Agent, Actions
import random, util
import distanceCalculator
class CompetitionAgent(Agent):
"""
A base class for competition agents. The convenience methods herein handle
some of the complications of the game.
Recommended Usage: Subclass CompetitionAgent and override getAction.
"""
#############################
# Methods to store key info #
#############################
def __init__(self, index=0):
"""
Lists several variables you can query:
self.index = index for this agent
self.distancer = distance calculator (contest code provides this)
self.timeForComputing = an amount of time to give each turn for computing maze distances
(part of the provided distance calculator)
"""
# Agent index for querying state, N.B. pacman is always agent 0
self.index = index
# Maze distance calculator
self.distancer = None
# Time to spend each turn on computing maze distances
# Access to the graphics
self.display = None
# useful function to find functions you've defined elsewhere..
# self.usefulFunction = util.lookup(usefulFn, globals())
# self.evaluationFunction = util.lookup(evalFn, globals())
def registerInitialState(self, gameState):
"""
This method handles the initial setup of the
agent to populate useful fields.
A distanceCalculator instance caches the maze distances
between each pair of positions, so your agents can use:
self.distancer.getDistance(p1, p2)
"""
self.distancer = distanceCalculator.Distancer(gameState.data.layout)
# comment this out to forgo maze distance computation and use manhattan distances
self.distancer.getMazeDistances()
# Static world properties
self.wallList = gameState.getWalls()
self.wallHeight = self.wallList.height
self.wallWidth = self.wallList.width
# Determine in which world you are
if self.wallHeight == 9 and self.wallWidth == 25:
self.world = 'level0'
if self.wallHeight == 7 and self.wallWidth == 20:
self.world = 'level1'
if self.wallHeight == 13 and self.wallWidth == 20:
self.world = 'level2'
if self.wallHeight == 27 and self.wallWidth == 28:
self.world = 'level3'
else:
self.world = 'unknown'
# Set the depth at which you want to search
if self.world == 'level0':
self.depth = 2
self.timeForComputing = .2
if self.world == 'level1':
self.depth = 3
self.timeForComputing = .2
if self.world == 'level2':
self.depth = 2
self.timeForComputing = .3
self.capsuleImpulse = True
if self.world == 'level3':
self.depth = 3
self.timeForComputing = .25
if self.world == 'unknown':
self.depth = 2
self.timeForComputing = .2
# Prepare for the pacman ExploredList
self.exploredListGrid = [[0 for x in range(100)] for x in range(100)]
self.exploredList = []
# Prepare for the ghost properties
# ghostIndex, DistanceToGhost, ScaredTime = ghost
self.ghosts = [(0, float('Inf'), 0), (1, float('Inf'), 0), (2, float('Inf'), 0), (3, float('Inf'), 0)]
# If the response is triggered to get a capsule, than go get it
self.capsuleImpulse = False
import __main__
if '_display' in dir(__main__):
self.display = __main__._display
#################
# Action Choice #
#################
def getAction(self, gameState):
"""
Override this method to make a good agent. It should return a legal action within
the time limit (otherwise a random legal action will be chosen for you).
"""
util.raiseNotDefined()
#######################
# Convenience Methods #
#######################
def getFood(self, gameState):
"""
Returns the food you're meant to eat. This is in the form of a matrix
where m[x][y]=true if there is food you can eat (based on your team) in that square.
"""
return gameState.getFood()
def getCapsules(self, gameState):
return gameState.getCapsules()
def getScore(self, gameState):
"""
Returns how much you are beating the other team by in the form of a number
that is the difference between your score and the opponents score. This number
is negative if you're losing.
"""
return gameState.getScore()
def getMazeDistance(self, pos1, pos2):
"""
Returns the distance between two points; These are calculated using the provided
distancer object.
If distancer.getMazeDistances() has been called, then maze distances are available.
Otherwise, this just returns Manhattan distance.
"""
d = self.distancer.getDistance(pos1, pos2)
return d
class MyPacmanAgent(CompetitionAgent):
"""
This is going to be your brilliant competition agent.
You might want to copy code from BaselineAgent (above) and/or any previos assignment.
"""
def getAction(self, gameState):
"""
getAction chooses among the best options according to the evaluation function.
Just like in the previous projects, getAction takes a GameState and returns
some Directions.X for some X in the set {North, South, West, East, Stop}.
"""
# Add current position to your exploredList (only look at the last 20 positions)
x, y = gameState.getPacmanPosition()
self.exploredListGrid[x][y] += 1
self.exploredList.append((x, y))
if len(self.exploredList) > 20:
x, y = self.exploredList.pop(0)
self.exploredListGrid[x][y] += -1
# Update the previous food and capsule state
self.foodGrid = gameState.getFood()
self.capsules = gameState.getCapsules()
self.oldScore = gameState.getScore()
self.nrOfFoods = len(self.foodGrid.asList())
# Helper Functions
def maxValue(state, currentDepth, alpha, beta):
"""
Calculates the maximum score possible for the pacman Agent
"""
currentDepth = currentDepth + 1
if state.isWin() or state.isLose() or currentDepth == self.depth:
return self.evaluationFunction(state)
maxScore = float('-Inf')
for pacmanAction in state.getLegalActions(0):
maxScore = max(maxScore, minValue(state.generateSuccessor(0, pacmanAction), currentDepth, 1, alpha, beta))
alpha = max(alpha, maxScore)
if beta <= alpha:
break # prune
return maxScore
def minValue(state, currentDepth, ghostIndex, alpha, beta):
"""
Calculates the minimum score possible for the ghost Agent(s)
"""
if state.isWin() or state.isLose():
return self.evaluationFunction(state)
minScore = float('Inf')
for ghostAction in state.getLegalActions(ghostIndex):
if ghostIndex == gameState.getNumAgents() - 1:
minScore = min(minScore, maxValue(state.generateSuccessor(ghostIndex, ghostAction), currentDepth, alpha, beta))
else:
minScore = min(minScore, minValue(state.generateSuccessor(ghostIndex, ghostAction), currentDepth, ghostIndex + 1, alpha, beta))
beta = min(beta, minScore)
if beta <= alpha:
break # prune
return minScore
# Begin AlphaBeta
pacmanActions = gameState.getLegalActions(0)
pacmanActions.remove("Stop")
maximum = float('-Inf')
alpha = float('-Inf')
beta = float('Inf')
maxAction = ''
for pacmanAction in pacmanActions:
currentDepth = 0
currentMax = minValue(gameState.generateSuccessor(0, pacmanAction), currentDepth, 1, alpha, beta)
if currentMax > maximum:
maximum = currentMax
maxAction = pacmanAction
if maxAction == '':
if self.lastAction in pacmanActions:
return self.lastAction
else:
import random
return random.choice(pacmanActions)
self.lastAction = maxAction
return maxAction
def evaluationFunction(self, state):
"""
Masterful Evaluation Function
"""
# Utilise a counter for the heuristic
heuristic = util.Counter()
# World Properties
oldFoodGrid = self.foodGrid
foodGrid = state.getFood()
nrOfFoods = len(foodGrid.asList())
capsules = self.capsules
# Pacman Properties
pacmanPosition = state.getPacmanPosition()
xPacman, yPacman = pacmanPosition
pacmanActions = set(Actions.getLegalNeighbors(pacmanPosition, self.wallList))
# Ghost Properties
ghostPositions = state.getGhostPositions()
ghostStates = state.getGhostStates()
nrGhosts = state.getNumAgents() - 1
ghostActions = []
totalGhostDistance = 0
minGhostDistance = float('Inf')
minScaredGhostDistance = float('Inf')
maxScaredTimer = float('-Inf')
for ghost in range(nrGhosts):
ghostIndex, ghostDistance, scaredTime= self.ghosts[ghost]
ghostDistance = self.getMazeDistance(pacmanPosition, ghostPositions[ghost])
totalGhostDistance += ghostDistance
scaredTime = ghostStates[ghost].scaredTimer
ghostActions += Actions.getLegalNeighbors(ghostPositions[ghost], self.wallList)
if ghostDistance < minScaredGhostDistance and scaredTime > 0:
minScaredGhostDistance = ghostDistance
if ghostDistance < minGhostDistance:
minGhostDistance = ghostDistance
if scaredTime > maxScaredTimer:
maxScaredTimer = scaredTime
self.ghosts[ghost] = (ghostIndex, ghostDistance, scaredTime)
# Help Functions
def minFoodDist(foodGrid, position):
"""
Returns the minimum food distance
It first searches for foods that are close by to save computation time.
"""
x, y = position
distances = []
if (x < 7):
x = 4
if (x >= self.wallWidth - 2):
x += -4
if (y < 7):
y = 4
if (y >= self.wallHeight - 2):
y += -4
for xFood in range(x-3,x+3,1):
for yFood in range (y-3,y+3,1):
food = foodGrid[xFood][yFood]
if food:
distances.append(self.getMazeDistance((xFood, yFood), position))
if len(distances) == 0:
distances = [self.getMazeDistance(food, position) for food in foodGrid.asList()]
if len(distances) > 0:
minDistance = min(distances)
return minDistance
else:
return 0
# Check for trapped situations (there are no good options for pacman)
goodActions = pacmanActions - set(ghostActions)
if not goodActions:
heuristic['trapped'] = -2000
# Lose case
if state.isLose():
return float('-Inf')
# Prefer not to visit already visited places (avoiding loops)
if self.exploredListGrid[xPacman][yPacman] > 2 and not(maxScaredTimer > 0):
heuristic['beenThere'] = -100 * self.exploredListGrid[xPacman][yPacman]
foodDifference = self.nrOfFoods - nrOfFoods
if foodDifference == 1:
heuristic['OneFoodLess'] = 1000
# Minimum distance to the food
if not(maxScaredTimer > 0):
if not oldFoodGrid[xPacman][yPacman]:
heuristic['minFoodDistance'] = -minFoodDist(foodGrid, pacmanPosition)/(self.wallWidth * self.wallHeight)
# Eating ghosts
if maxScaredTimer > 1:
# if maxScaredTimer < 2 * minScaredGhostDistance and maxScaredTimer > 0:
heuristic['nearScaredGhost'] = 100 / minScaredGhostDistance
# Prioritise ghost eating when ghosts are scared, not food
if maxScaredTimer > 0:
if oldFoodGrid[xPacman][yPacman]:
heuristic['eatFood'] = -10
# Capsule Reasoning
capsuleDistance = [self.getMazeDistance(capsule, pacmanPosition) for capsule in capsules]
if capsuleDistance and minGhostDistance < 10 and min(capsuleDistance) < 10:
self.capsuleImpulse = True
# Eat the powerpelets before finishing the level
if capsuleDistance and self.nrOfFoods == 1 and oldFoodGrid[xPacman][yPacman]:
heuristic['PowerpeletFirst'] = -1000
self.capsuleImpulse = True
# If Ghosts not scared, than don't give higher heuristic for capsule eating
if self.capsuleImpulse and not(maxScaredTimer > 0):
if capsuleDistance:
heuristic['nearCapsule'] = 10 / min(capsuleDistance)
if pacmanPosition in capsules:
heuristic['eatCapsule'] = 300
self.capsuleImpulse = False
# World specific heuristics
if self.world == 'level0' or self.world == 'level1':
if self.nrOfFoods == 1 and maxScaredTimer > 0 and oldFoodGrid[xPacman][yPacman]:
heuristic['GhostsFirst'] = -10000
heuristic['score'] = state.getScore()
return heuristic.totalCount()
MyPacmanAgent = MyPacmanAgent
|
ScaleRunner/PacmanAI
|
6 - Contest/competitionAgents.py
|
Python
|
mit
| 13,362
|
[
"VisIt"
] |
9258c7a7e98655ac5727278e550a36ada615bae813027f508c9b8a14580377a5
|
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2007 Johan Gonqvist <johan.gronqvist@gmail.com>
# Copyright (C) 2007-2009 Gary Burton <gary.burton@zen.co.uk>
# Copyright (C) 2007-2009 Stephane Charette <stephanecharette@gmail.com>
# Copyright (C) 2008-2009 Brian G. Matherly
# Copyright (C) 2008 Jason M. Simanek <jason@bohemianalps.com>
# Copyright (C) 2008-2011 Rob G. Healey <robhealey1@gmail.com>
# Copyright (C) 2010 Doug Blank <doug.blank@gmail.com>
# Copyright (C) 2010 Jakim Friant
# Copyright (C) 2010- Serge Noiraud
# Copyright (C) 2011 Tim G L Lyons
# Copyright (C) 2013 Benny Malengier
# Copyright (C) 2016 Allen Crider
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Narrative Web Page generator.
Classe:
SourcePage - Source index page and individual Source pages
"""
#------------------------------------------------
# python modules
#------------------------------------------------
from collections import defaultdict
from decimal import getcontext
import logging
#------------------------------------------------
# Gramps module
#------------------------------------------------
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.lib import Source
from gramps.plugins.lib.libhtml import Html
#------------------------------------------------
# specific narrative web import
#------------------------------------------------
from gramps.plugins.webreport.basepage import BasePage
from gramps.plugins.webreport.common import (FULLCLEAR, html_escape)
_ = glocale.translation.sgettext
LOG = logging.getLogger(".NarrativeWeb.source")
getcontext().prec = 8
#################################################
#
# creates the Source List Page and Source Pages
#
#################################################
class SourcePages(BasePage):
"""
This class is responsible for displaying information about the 'Source'
database objects. It displays this information under the 'Sources'
tab. It is told by the 'add_instances' call which 'Source's to display,
and remembers the list of persons. A single call to 'display_pages'
displays both the Individual List (Index) page and all the Individual
pages.
The base class 'BasePage' is initialised once for each page that is
displayed.
"""
def __init__(self, report, the_lang, the_title):
"""
@param: report -- The instance of the main report class
for this report
@param: the_lang -- The lang to process
@param: the_title -- The title page related to the language
"""
BasePage.__init__(self, report, the_lang, the_title)
self.source_dict = defaultdict(set)
self.navigation = None
self.citationreferents = None
def display_pages(self, the_lang, the_title):
"""
Generate and output the pages under the Sources tab, namely the sources
index and the individual sources pages.
@param: the_lang -- The lang to process
@param: the_title -- The title page related to the language
"""
LOG.debug("obj_dict[Source]")
for item in self.report.obj_dict[Source].items():
LOG.debug(" %s", str(item))
message = _("Creating source pages")
progress_title = self.report.pgrs_title(the_lang)
with self.r_user.progress(progress_title, message,
len(self.report.obj_dict[Source]) + 1
) as step:
self.sourcelistpage(self.report, the_lang, the_title,
self.report.obj_dict[Source].keys())
index = 1
for source_handle in self.report.obj_dict[Source]:
step()
index += 1
self.sourcepage(self.report, the_lang, the_title, source_handle)
def sourcelistpage(self, report, the_lang, the_title, source_handles):
"""
Generate and output the Sources index page.
@param: report -- The instance of the main report class for
this report
@param: the_lang -- The lang to process
@param: the_title -- The title page related to the language
@param: source_handles -- A list of the handles of the sources to be
displayed
"""
BasePage.__init__(self, report, the_lang, the_title)
source_dict = {}
output_file, sio = self.report.create_file("sources")
result = self.write_header(self._("Sources"))
sourcelistpage, dummy_head, dummy_body, outerwrapper = result
# begin source list division
with Html("div", class_="content", id="Sources") as sourceslist:
outerwrapper += sourceslist
# Sort the sources
for handle in source_handles:
source = self.r_db.get_source_from_handle(handle)
if source is not None:
key = source.get_title() + source.get_author()
key += str(source.get_gramps_id())
source_dict[key] = (source, handle)
keys = sorted(source_dict, key=self.rlocale.sort_key)
msg = self._("This page contains an index of all the sources "
"in the database, sorted by their title. "
"Clicking on a source’s "
"title will take you to that source’s page.")
sourceslist += Html("p", msg, id="description")
# begin sourcelist table and table head
with Html("table",
class_="infolist primobjlist sourcelist") as table:
sourceslist += table
thead = Html("thead")
table += thead
trow = Html("tr")
thead += trow
header_row = [
(self._("Number"), "ColumnRowLabel"),
(self._("Author"), "ColumnAuthor"),
(self._("Name", "Source Name"), "ColumnName")]
trow.extend(
Html("th", label or " ", class_=colclass, inline=True)
for (label, colclass) in header_row
)
# begin table body
tbody = Html("tbody")
table += tbody
for index, key in enumerate(keys):
source, source_handle = source_dict[key]
trow = Html("tr") + (
Html("td", index + 1, class_="ColumnRowLabel",
inline=True)
)
tbody += trow
trow.extend(
Html("td", source.get_author(), class_="ColumnAuthor",
inline=True)
)
trow.extend(
Html("td", self.source_link(source_handle,
source.get_title(),
source.get_gramps_id()),
class_="ColumnName")
)
# add clearline for proper styling
# add footer section
footer = self.write_footer(None)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(sourcelistpage, output_file, sio, 0)
def sourcepage(self, report, the_lang, the_title, source_handle):
"""
Generate and output an individual Source page.
@param: report -- The instance of the main report class
for this report
@param: the_lang -- The lang to process
@param: the_title -- The title page related to the language
@param: source_handle -- The handle of the source to be output
"""
source = report.database.get_source_from_handle(source_handle)
BasePage.__init__(self, report, the_lang, the_title,
source.get_gramps_id())
if not source:
return
self.page_title = source.get_title()
inc_repositories = self.report.options["inc_repository"]
self.navigation = self.report.options['navigation']
self.citationreferents = self.report.options['citationreferents']
output_file, sio = self.report.create_file(source_handle, "src")
self.uplink = True
result = self.write_header("%s - %s" % (self._('Sources'),
self.page_title))
sourcepage, dummy_head, dummy_body, outerwrapper = result
ldatec = 0
# begin source detail division
with Html("div", class_="content", id="SourceDetail") as sourcedetail:
outerwrapper += sourcedetail
media_list = source.get_media_list()
if self.create_media and media_list:
thumbnail = self.disp_first_img_as_thumbnail(media_list,
source)
if thumbnail is not None:
sourcedetail += thumbnail
# add section title
sourcedetail += Html("h3", html_escape(source.get_title()),
inline=True)
# begin sources table
with Html("table", class_="infolist source") as table:
sourcedetail += table
tbody = Html("tbody")
table += tbody
source_gid = False
if not self.noid and self.gid:
source_gid = source.get_gramps_id()
# last modification of this source
ldatec = source.get_change_time()
for (label, value) in [(self._("Gramps ID"), source_gid),
(self._("Author"), source.get_author()),
(self._("Abbreviation"),
source.get_abbreviation()),
(self._("Publication information"),
source.get_publication_info())]:
if value:
trow = Html("tr") + (
Html("td", label, class_="ColumnAttribute",
inline=True),
Html("td", value, class_="ColumnValue", inline=True)
)
tbody += trow
# Tags
tags = self.show_tags(source)
if tags and self.report.inc_tags:
trow = Html("tr") + (
Html("td", self._("Tags"),
class_="ColumnAttribute", inline=True),
Html("td", tags,
class_="ColumnValue", inline=True)
)
tbody += trow
# Source notes
notelist = self.display_note_list(source.get_note_list(), Source)
if notelist is not None:
sourcedetail += notelist
# additional media from Source (if any?)
if self.create_media and media_list:
sourcemedia = self.disp_add_img_as_gallery(media_list, source)
if sourcemedia is not None:
sourcedetail += sourcemedia
# Source Data Map...
src_data_map = self.write_srcattr(source.get_attribute_list())
if src_data_map is not None:
sourcedetail += src_data_map
# Source Repository list
if inc_repositories:
repo_list = self.dump_repository_ref_list(
source.get_reporef_list())
if repo_list is not None:
sourcedetail += repo_list
# Source references list
ref_list = self.display_bkref_list(Source, source_handle)
if ref_list is not None:
sourcedetail += ref_list
# add clearline for proper styling
# add footer section
footer = self.write_footer(ldatec)
outerwrapper += (FULLCLEAR, footer)
# send page out for processing
# and close the file
self.xhtml_writer(sourcepage, output_file, sio, ldatec)
|
Fedik/gramps
|
gramps/plugins/webreport/source.py
|
Python
|
gpl-2.0
| 13,250
|
[
"Brian"
] |
f5f0772e2ee02bb3fa03e9b46fd92f60d5dce536ce0b30a2b9ee114c365ea681
|
def read_aims(filename):
"""Import FHI-aims geometry type files.
Reads unitcell, atom positions and constraints from
a geometry.in file.
"""
from ase import Atoms
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
atoms = Atoms()
fd = open(filename, 'r')
lines = fd.readlines()
fd.close()
positions = []
cell = []
symbols = []
magmoms = []
fix = []
fix_cart = []
xyz = np.array([0, 0, 0])
i = -1
n_periodic = -1
periodic = np.array([False, False, False])
for n, line in enumerate(lines):
inp = line.split()
if inp == []:
continue
if inp[0] == 'atom':
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
positions.append(floatvect)
symbols.append(inp[-1])
i += 1
xyz = np.array([0, 0, 0])
elif inp[0] == 'lattice_vector':
floatvect = float(inp[1]), float(inp[2]), float(inp[3])
cell.append(floatvect)
n_periodic = n_periodic + 1
periodic[n_periodic] = True
elif inp[0] == 'initial_moment':
magmoms.append(float(inp[1]))
if inp[0] == 'constrain_relaxation':
if inp[1] == '.true.':
fix.append(i)
elif inp[1] == 'x':
xyz[0] = 1
elif inp[1] == 'y':
xyz[1] = 1
elif inp[1] == 'z':
xyz[2] = 1
if xyz.all():
fix.append(i)
elif xyz.any():
fix_cart.append(FixCartesian(i, xyz))
atoms = Atoms(symbols, positions)
if len(magmoms) > 0:
atoms.set_initial_magnetic_moments(magmoms)
if periodic.any():
atoms.set_cell(cell)
atoms.set_pbc(periodic)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
return atoms
def write_aims(filename, atoms):
"""Method to write FHI-aims geometry files.
Writes the atoms positions and constraints (only FixAtoms is
supported at the moment).
"""
from ase.constraints import FixAtoms, FixCartesian
import numpy as np
if isinstance(atoms, (list, tuple)):
if len(atoms) > 1:
raise RuntimeError("Don't know how to save more than "+
"one image to FHI-aims input")
else:
atoms = atoms[0]
fd = open(filename, 'w')
fd.write('#=======================================================\n')
fd.write('#FHI-aims file: '+filename+'\n')
fd.write('#Created using the Atomic Simulation Environment (ASE)\n')
fd.write('#=======================================================\n')
i = 0
if atoms.get_pbc().any():
for n, vector in enumerate(atoms.get_cell()):
fd.write('lattice_vector ')
for i in range(3):
fd.write('%16.16f ' % vector[i])
fd.write('\n')
fix_cart = np.zeros([len(atoms),3])
if atoms.constraints:
for constr in atoms.constraints:
if isinstance(constr, FixAtoms):
fix_cart[constr.index] = [1,1,1]
elif isinstance(constr, FixCartesian):
fix_cart[constr.a] = -constr.mask+1
for i, atom in enumerate(atoms):
fd.write('atom ')
for pos in atom.position:
fd.write('%16.16f ' % pos)
fd.write(atom.symbol)
fd.write('\n')
# (1) all coords are constrained:
if fix_cart[i].all():
fd.write('constrain_relaxation .true.\n')
# (2) some coords are constrained:
elif fix_cart[i].any():
xyz = fix_cart[i]
for n in range(3):
if xyz[n]:
fd.write('constrain_relaxation %s\n' % 'xyz'[n])
if atom.charge:
fd.write('initial_charge %16.6f\n' % atom.charge)
if atom.magmom:
fd.write('initial_moment %16.6f\n' % atom.magmom)
# except KeyError:
# continue
def read_energy(filename):
for line in open(filename, 'r'):
if line.startswith(' | Total energy corrected'):
E = float(line.split()[-2])
return E
def read_aims_output(filename, index = -1):
""" Import FHI-aims output files with all data available, i.e. relaxations,
MD information, force information etc etc etc. """
from ase import Atoms, Atom
from ase.calculators.singlepoint import SinglePointCalculator
from ase.units import Ang, fs
from ase.constraints import FixAtoms, FixCartesian
molecular_dynamics = False
fd = open(filename, 'r')
cell = []
images = []
fix = []
fix_cart = []
n_periodic = -1
f = None
pbc = False
found_aims_calculator = False
v_unit = Ang/(1000.0*fs)
while True:
line = fd.readline()
if not line:
break
if "List of parameters used to initialize the calculator:" in line:
fd.readline()
calc = read_aims_calculator(fd)
calc.out = filename
found_aims_calculator = True
if "Number of atoms" in line:
inp = line.split()
n_atoms = int(inp[5])
if "| Unit cell:" in line:
if not pbc:
pbc = True
for i in range(3):
inp = fd.readline().split()
cell.append([inp[1],inp[2],inp[3]])
if "Found relaxation constraint for atom" in line:
xyz = [0, 0, 0]
ind = int(line.split()[5][:-1])-1
if "All coordinates fixed" in line:
if ind not in fix:
fix.append(ind)
if "coordinate fixed" in line:
coord = line.split()[6]
constr_ind = 0
if coord == 'x':
xyz[0] = 1
elif coord == 'y':
xyz[1] = 1
elif coord == 'z':
xyz[2] = 1
keep = True
for n,c in enumerate(fix_cart):
if ind == c.a:
keep = False
constr_ind = n
if keep:
fix_cart.append(FixCartesian(ind, xyz))
else:
fix_cart[n].mask[xyz.index(1)] = 0
if "Atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[3],(inp[4],inp[5],inp[6])))
if "Complete information for previous time-step:" in line:
molecular_dynamics = True
if "Updated atomic structure:" in line and not molecular_dynamics:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
if 'lattice_vector' in inp[0]:
cell = []
for i in range(3):
cell += [[float(inp[1]),float(inp[2]),float(inp[3])]]
inp = fd.readline().split()
atoms.set_cell(cell)
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
if molecular_dynamics:
inp = fd.readline().split()
if "Atomic structure (and velocities)" in line:
fd.readline()
atoms = Atoms()
velocities = []
for i in range(n_atoms):
inp = fd.readline().split()
atoms.append(Atom(inp[4],(inp[1],inp[2],inp[3])))
inp = fd.readline().split()
velocities += [[float(inp[1])*v_unit,float(inp[2])*v_unit,float(inp[3])*v_unit]]
atoms.set_velocities(velocities)
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
if "Total atomic forces" in line:
f = []
for i in range(n_atoms):
inp = fd.readline().split()
f.append([float(inp[2]),float(inp[3]),float(inp[4])])
if not found_aims_calculator:
e = images[-1].get_potential_energy()
images[-1].set_calculator(SinglePointCalculator(e,f,None,None,atoms))
e = None
f = None
if "Total energy corrected" in line:
e = float(line.split()[5])
if pbc:
atoms.set_cell(cell)
atoms.pbc = True
if not found_aims_calculator:
atoms.set_calculator(SinglePointCalculator(e,None,None,None,atoms))
if not molecular_dynamics:
if len(fix):
atoms.set_constraint([FixAtoms(indices=fix)]+fix_cart)
else:
atoms.set_constraint(fix_cart)
images.append(atoms)
e = None
if found_aims_calculator:
calc.set_results(images[-1])
images[-1].set_calculator(calc)
fd.close()
if molecular_dynamics:
images = images[1:]
# return requested images, code borrowed from ase/io/trajectory.py
if isinstance(index, int):
return images[index]
else:
step = index.step or 1
if step > 0:
start = index.start or 0
if start < 0:
start += len(images)
stop = index.stop or len(images)
if stop < 0:
stop += len(images)
else:
if index.start is None:
start = len(images) - 1
else:
start = index.start
if start < 0:
start += len(images)
if index.stop is None:
stop = -1
else:
stop = index.stop
if stop < 0:
stop += len(images)
return [images[i] for i in range(start, stop, step)]
|
conwayje/ase-python
|
ase/io/aims.py
|
Python
|
gpl-2.0
| 10,341
|
[
"ASE",
"FHI-aims"
] |
9c45d5c200057153bd83b9d6072c20d09aec8325f4931d2c2ee6f2be9148dee3
|
# -*- coding: utf-8 -*-
# flake8: noqa: E741
import gzip
import re
import subprocess
import time
import os
import shutil
from datetime import datetime, timedelta, timezone
from io import BytesIO, StringIO
from pathlib import Path
from unittest import mock
from flaky import flaky
from flask import session, escape, url_for, g, request
from flask_babel import gettext
from mock import patch, ANY
import pytest
from passphrases import PassphraseGenerator
from source_app.session_manager import SessionManager
from . import utils
import version
from db import db
from journalist_app.utils import delete_collection
from models import InstanceConfig, Source, Reply
from source_app import api as source_app_api, session_manager
from source_app import get_logo_url
from .utils.db_helper import new_codename, submit
from .utils.i18n import get_test_locales, language_tag, page_language, xfail_untranslated_messages
from .utils.instrument import InstrumentedApp
GENERATE_DATA = {'tor2web_check': 'href="fake.onion"'}
def test_logo_default_available(config, source_app):
# if the custom image is available, this test will fail
custom_image_location = os.path.join(
config.SECUREDROP_ROOT, "static/i/custom_logo.png"
)
if os.path.exists(custom_image_location):
os.remove(custom_image_location)
with source_app.test_client() as app:
logo_url = get_logo_url(source_app)
assert logo_url.endswith('i/logo.png')
response = app.get(logo_url, follow_redirects=False)
assert response.status_code == 200
def test_logo_custom_available(config, source_app):
# if the custom image is available, this test will fail
custom_image = os.path.join(config.SECUREDROP_ROOT, "static/i/custom_logo.png")
default_image = os.path.join(config.SECUREDROP_ROOT, "static/i/logo.png")
if os.path.exists(default_image) and not os.path.exists(custom_image):
shutil.copyfile(default_image, custom_image)
with source_app.test_client() as app:
logo_url = get_logo_url(source_app)
assert logo_url.endswith('i/custom_logo.png')
response = app.get(logo_url, follow_redirects=False)
assert response.status_code == 200
def test_page_not_found(source_app):
"""Verify the page not found condition returns the intended template"""
with InstrumentedApp(source_app) as ins:
with source_app.test_client() as app:
resp = app.get('UNKNOWN')
assert resp.status_code == 404
ins.assert_template_used('notfound.html')
def test_orgname_default_set(source_app):
class dummy_current():
organization_name = None
with patch.object(InstanceConfig, 'get_current') as iMock:
with source_app.test_client() as app:
iMock.return_value = dummy_current()
resp = app.get(url_for('main.index'))
assert resp.status_code == 200
assert g.organization_name == "SecureDrop"
def test_index(source_app):
"""Test that the landing page loads and looks how we expect"""
with source_app.test_client() as app:
resp = app.get(url_for('main.index'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert 'First submission' in text
assert 'Return visit' in text
def _find_codename(html):
"""Find a source codename (diceware passphrase) in HTML"""
# Codenames may contain HTML escape characters, and the wordlist
# contains various symbols.
codename_re = (r'<mark [^>]*id="codename"[^>]*>'
r'(?P<codename>[a-z0-9 &#;?:=@_.*+()\'"$%!-]+)</mark>')
codename_match = re.search(codename_re, html)
assert codename_match is not None
return codename_match.group('codename')
def test_generate_already_logged_in(source_app):
with source_app.test_client() as app:
new_codename(app, session)
# Make sure it redirects to /lookup when logged in
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 302
# Make sure it flashes the message on the lookup page
resp = app.post(url_for('main.generate'), data=GENERATE_DATA, follow_redirects=True)
# Should redirect to /lookup
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "because you are already logged in." in text
def test_create_new_source(source_app):
with source_app.test_client() as app:
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
tab_id = next(iter(session['codenames'].keys()))
resp = app.post(url_for('main.create'), data={'tab_id': tab_id}, follow_redirects=True)
assert SessionManager.is_user_logged_in(db_session=db.session)
# should be redirected to /lookup
text = resp.data.decode('utf-8')
assert "Submit Files" in text
assert 'codenames' not in session
def test_generate(source_app):
with source_app.test_client() as app:
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
session_codename = next(iter(session['codenames'].values()))
text = resp.data.decode('utf-8')
assert "This codename is what you will use in future visits" in text
codename = _find_codename(resp.data.decode('utf-8'))
# codename is also stored in the session - make sure it matches the
# codename displayed to the source
assert codename == escape(session_codename)
def test_create_duplicate_codename_logged_in_not_in_session(source_app):
with patch.object(source_app.logger, 'error') as logger:
with source_app.test_client() as app:
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
tab_id, codename = next(iter(session['codenames'].items()))
# Create a source the first time
resp = app.post(url_for('main.create'), data={'tab_id': tab_id}, follow_redirects=True)
assert resp.status_code == 200
with source_app.test_client() as app:
# Attempt to add the same source
with app.session_transaction() as sess:
sess['codenames'] = {tab_id: codename}
sess["codenames_expire"] = datetime.utcnow() + timedelta(hours=1)
resp = app.post(url_for('main.create'), data={'tab_id': tab_id}, follow_redirects=True)
logger.assert_called_once()
assert "Could not create a source" in logger.call_args[0][0]
assert resp.status_code == 200
assert not SessionManager.is_user_logged_in(db_session=db.session)
def test_create_duplicate_codename_logged_in_in_session(source_app):
with source_app.test_client() as app:
# Given a user who generated a codename in a browser tab
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
first_tab_id, first_codename = list(session['codenames'].items())[0]
# And then they opened a new browser tab to generate a second codename
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
second_tab_id, second_codename = list(session['codenames'].items())[1]
assert first_codename != second_codename
# And the user then completed the account creation flow in the first tab
resp = app.post(
url_for('main.create'), data={'tab_id': first_tab_id}, follow_redirects=True
)
assert resp.status_code == 200
first_tab_account = SessionManager.get_logged_in_user(db_session=db.session)
# When the user tries to complete the account creation flow again, in the second tab
resp = app.post(
url_for('main.create'), data={'tab_id': second_tab_id}, follow_redirects=True
)
# Then the user is shown the "already logged in" message
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "You are already logged in." in text
# And no new account was created
second_tab_account = SessionManager.get_logged_in_user(db_session=db.session)
assert second_tab_account.filesystem_id == first_tab_account.filesystem_id
def test_lookup(source_app):
"""Test various elements on the /lookup page."""
with source_app.test_client() as app:
codename = new_codename(app, session)
resp = app.post(url_for('main.login'), data=dict(codename=codename),
follow_redirects=True)
# redirects to /lookup
text = resp.data.decode('utf-8')
assert "public key" in text
# download the public key
resp = app.get(url_for('info.download_public_key'))
text = resp.data.decode('utf-8')
assert "BEGIN PGP PUBLIC KEY BLOCK" in text
def test_journalist_key_redirects_to_public_key(source_app):
"""Test that the /journalist-key route redirects to /public-key."""
with source_app.test_client() as app:
resp = app.get(url_for('info.download_journalist_key'))
assert resp.status_code == 301
resp = app.get(url_for('info.download_journalist_key'), follow_redirects=True)
assert request.path == url_for('info.download_public_key')
assert "BEGIN PGP PUBLIC KEY BLOCK" in resp.data.decode('utf-8')
def test_login_and_logout(source_app):
with source_app.test_client() as app:
resp = app.get(url_for('main.login'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Enter Codename" in text
codename = new_codename(app, session)
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Submit Files" in text
assert SessionManager.is_user_logged_in(db_session=db.session)
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data=dict(codename='invalid'),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert 'Sorry, that is not a recognized codename.' in text
assert not SessionManager.is_user_logged_in(db_session=db.session)
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
assert SessionManager.is_user_logged_in(db_session=db.session)
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
assert SessionManager.is_user_logged_in(db_session=db.session)
resp = app.get(url_for('main.logout'),
follow_redirects=True)
assert not SessionManager.is_user_logged_in(db_session=db.session)
text = resp.data.decode('utf-8')
# This is part of the logout page message instructing users
# to click the 'New Identity' icon
assert 'This will clear your Tor Browser activity data' in text
def test_user_must_log_in_for_protected_views(source_app):
with source_app.test_client() as app:
resp = app.get(url_for('main.lookup'),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Enter Codename" in text
def test_login_with_whitespace(source_app):
"""
Test that codenames with leading or trailing whitespace still work
"""
def login_test(app, codename):
resp = app.get(url_for('main.login'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Enter Codename" in text
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Submit Files" in text
assert SessionManager.is_user_logged_in(db_session=db.session)
with source_app.test_client() as app:
codename = new_codename(app, session)
codenames = [
codename + ' ',
' ' + codename + ' ',
' ' + codename,
]
for codename_ in codenames:
with source_app.test_client() as app:
login_test(app, codename_)
def test_login_with_missing_reply_files(source_app, app_storage):
"""
Test that source can log in when replies are present in database but missing
from storage.
"""
source, codename = utils.db_helper.init_source(app_storage)
journalist, _ = utils.db_helper.init_journalist()
replies = utils.db_helper.reply(app_storage, journalist, source, 1)
assert len(replies) > 0
# Delete the reply file
reply_file_path = Path(app_storage.path(source.filesystem_id, replies[0].filename))
reply_file_path.unlink()
assert not reply_file_path.exists()
with source_app.test_client() as app:
resp = app.get(url_for('main.login'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Enter Codename" in text
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Submit Files" in text
assert SessionManager.is_user_logged_in(db_session=db.session)
def _dummy_submission(app):
"""
Helper to make a submission (content unimportant), mostly useful in
testing notification behavior for a source's first vs. their
subsequent submissions
"""
return app.post(
url_for('main.submit'),
data=dict(msg="Pay no attention to the man behind the curtain.",
fh=(BytesIO(b''), '')),
follow_redirects=True)
def test_initial_submission_notification(source_app):
"""
Regardless of the type of submission (message, file, or both), the
first submission is always greeted with a notification
reminding sources to check back later for replies.
"""
with source_app.test_client() as app:
new_codename(app, session)
resp = _dummy_submission(app)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thank you for sending this information to us." in text
def test_submit_message(source_app):
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(msg="This is a test.", fh=(StringIO(''), '')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thanks! We received your message" in text
def test_submit_empty_message(source_app):
with source_app.test_client() as app:
new_codename(app, session)
resp = app.post(
url_for('main.submit'),
data=dict(msg="", fh=(StringIO(''), '')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "You must enter a message or choose a file to submit." \
in text
def test_submit_big_message(source_app):
"""
Test the message size limit.
"""
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(msg="AA" * (1024 * 512), fh=(StringIO(''), '')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Message text too long." in text
def test_submit_file(source_app):
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(msg="", fh=(BytesIO(b'This is a test'), 'test.txt')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert 'Thanks! We received your document' in text
def test_submit_both(source_app):
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(
msg="This is a test",
fh=(BytesIO(b'This is a test'), 'test.txt')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thanks! We received your message and document" in text
def test_submit_antispam(source_app):
"""
Test the antispam check.
"""
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(msg="Test", fh=(StringIO(''), ''), text="blah"),
follow_redirects=True)
assert resp.status_code == 403
def test_delete_all_successfully_deletes_replies(source_app, app_storage):
with source_app.app_context():
journalist, _ = utils.db_helper.init_journalist()
source, codename = utils.db_helper.init_source(app_storage)
source_id = source.id
utils.db_helper.reply(app_storage, journalist, source, 1)
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
resp = app.post(url_for('main.batch_delete'), follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "All replies have been deleted" in text
with source_app.app_context():
source = Source.query.get(source_id)
replies = Reply.query.filter(Reply.source_id == source_id).all()
for reply in replies:
assert reply.deleted_by_source is True
def test_delete_all_replies_deleted_by_source_but_not_journalist(source_app, app_storage):
"""Replies can be deleted by a source, but not by journalists. As such,
replies may still exist in the replies table, but no longer be visible."""
with source_app.app_context():
journalist, _ = utils.db_helper.init_journalist()
source, codename = utils.db_helper.init_source(app_storage)
utils.db_helper.reply(app_storage, journalist, source, 1)
replies = Reply.query.filter(Reply.source_id == source.id).all()
for reply in replies:
reply.deleted_by_source = True
db.session.add(reply)
db.session.commit()
with source_app.test_client() as app:
with patch.object(source_app.logger, 'error') as logger:
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
resp = app.post(url_for('main.batch_delete'),
follow_redirects=True)
assert resp.status_code == 200
logger.assert_called_once_with(
"Found no replies when at least one was expected"
)
def test_delete_all_replies_already_deleted_by_journalists(source_app, app_storage):
with source_app.app_context():
journalist, _ = utils.db_helper.init_journalist()
source, codename = utils.db_helper.init_source(app_storage)
# Note that we are creating the source and no replies
with source_app.test_client() as app:
with patch.object(source_app.logger, 'error') as logger:
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
resp = app.post(url_for('main.batch_delete'),
follow_redirects=True)
assert resp.status_code == 200
logger.assert_called_once_with(
"Found no replies when at least one was expected"
)
def test_submit_sanitizes_filename(source_app):
"""Test that upload file name is sanitized"""
insecure_filename = '../../bin/gpg'
sanitized_filename = 'bin_gpg'
with patch.object(gzip, 'GzipFile', wraps=gzip.GzipFile) as gzipfile:
with source_app.test_client() as app:
new_codename(app, session)
resp = app.post(
url_for('main.submit'),
data=dict(
msg="",
fh=(BytesIO(b'This is a test'), insecure_filename)),
follow_redirects=True)
assert resp.status_code == 200
gzipfile.assert_called_with(filename=sanitized_filename,
mode=ANY,
fileobj=ANY,
mtime=0)
@pytest.mark.parametrize("test_url", ['main.index', 'main.create', 'main.submit'])
def test_redirect_when_tor2web(config, source_app, test_url):
with source_app.test_client() as app:
resp = app.get(
url_for(test_url),
headers=[('X-tor2web', 'encrypted')],
follow_redirects=True)
text = resp.data.decode('utf-8')
assert resp.status_code == 403
assert "Proxy Service Detected" in text
def test_tor2web_warning(source_app):
with source_app.test_client() as app:
resp = app.get(url_for('info.tor2web_warning'))
assert resp.status_code == 403
text = resp.data.decode('utf-8')
assert "Proxy Service Detected" in text
def test_why_use_tor_browser(source_app):
with source_app.test_client() as app:
resp = app.get(url_for('info.recommend_tor_browser'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "You Should Use Tor Browser" in text
def test_why_journalist_key(source_app):
with source_app.test_client() as app:
resp = app.get(url_for('info.why_download_public_key'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Why download the team's public key?" in text
def test_metadata_route(config, source_app):
with patch("server_os.get_os_release", return_value="20.04"):
with source_app.test_client() as app:
resp = app.get(url_for('api.metadata'))
assert resp.status_code == 200
assert resp.headers.get('Content-Type') == 'application/json'
assert resp.json.get('allow_document_uploads') ==\
InstanceConfig.get_current().allow_document_uploads
assert resp.json.get('sd_version') == version.__version__
assert resp.json.get('server_os') == '20.04'
assert resp.json.get('supported_languages') ==\
config.SUPPORTED_LOCALES
assert resp.json.get('v3_source_url') is None
def test_metadata_v3_url(source_app):
onion_test_url = "abcdefghabcdefghabcdefghabcdefghabcdefghabcdefghabcdefgh.onion"
with patch.object(source_app_api, "get_sourcev3_url") as mocked_v3_url:
mocked_v3_url.return_value = (onion_test_url)
with source_app.test_client() as app:
resp = app.get(url_for('api.metadata'))
assert resp.status_code == 200
assert resp.headers.get('Content-Type') == 'application/json'
assert resp.json.get('v3_source_url') == onion_test_url
def test_login_with_overly_long_codename(source_app):
"""Attempting to login with an overly long codename should result in
an error to avoid DoS."""
overly_long_codename = 'a' * (PassphraseGenerator.MAX_PASSPHRASE_LENGTH + 1)
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data=dict(codename=overly_long_codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert ("Field must be between 1 and {} characters long."
.format(PassphraseGenerator.MAX_PASSPHRASE_LENGTH)) in text
def test_normalize_timestamps(source_app, app_storage):
"""
Check function of source_app.utils.normalize_timestamps.
All submissions for a source should have the same timestamp. Any
existing submissions' files that did not exist at the time of a
new submission should not be created by normalize_timestamps.
"""
with source_app.test_client() as app:
# create a source
source, codename = utils.db_helper.init_source(app_storage)
# create one submission
first_submission = submit(app_storage, source, 1)[0]
# delete the submission's file from the store
first_submission_path = Path(
app_storage.path(source.filesystem_id, first_submission.filename)
)
first_submission_path.unlink()
assert not first_submission_path.exists()
# log in as the source
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Submit Files" in text
assert SessionManager.is_user_logged_in(db_session=db.session)
# submit another message
resp = _dummy_submission(app)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thanks! We received your message" in text
# sleep to ensure timestamps would differ
time.sleep(1)
# submit another message
resp = _dummy_submission(app)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thanks! We received your message" in text
# only two of the source's three submissions should have files in the store
assert 3 == len(source.submissions)
submission_paths = [
Path(app_storage.path(source.filesystem_id, s.filename))
for s in source.submissions
]
extant_paths = [p for p in submission_paths if p.exists()]
assert 2 == len(extant_paths)
# verify that the deleted file has not been recreated
assert not first_submission_path.exists()
assert first_submission_path not in extant_paths
# and the timestamps of all existing files should match exactly
assert extant_paths[0].stat().st_atime_ns == extant_paths[1].stat().st_atime_ns
assert extant_paths[0].stat().st_ctime_ns == extant_paths[1].stat().st_ctime_ns
assert extant_paths[0].stat().st_mtime_ns == extant_paths[1].stat().st_mtime_ns
def test_failed_normalize_timestamps_logs_warning(source_app):
"""If a normalize timestamps event fails, the subprocess that calls
touch will fail and exit 1. When this happens, the submission should
still occur, but a warning should be logged (this will trigger an
OSSEC alert)."""
with patch.object(source_app.logger, 'warning') as logger:
with patch.object(subprocess, 'call', return_value=1):
with source_app.test_client() as app:
new_codename(app, session)
_dummy_submission(app)
resp = app.post(
url_for('main.submit'),
data=dict(
msg="This is a test.",
fh=(StringIO(''), '')),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Thanks! We received your message" in text
logger.assert_called_once_with(
"Couldn't normalize submission "
"timestamps (touch exited with 1)"
)
def test_source_is_deleted_while_logged_in(source_app):
"""If a source is deleted by a journalist when they are logged in,
a NoResultFound will occur. The source should be redirected to the
index when this happens, and a warning logged."""
with source_app.test_client() as app:
codename = new_codename(app, session)
app.post('login', data=dict(codename=codename), follow_redirects=True)
# Now that the source is logged in, the journalist deletes the source
source_user = SessionManager.get_logged_in_user(db_session=db.session)
delete_collection(source_user.filesystem_id)
# Source attempts to continue to navigate
resp = app.get(url_for('main.lookup'), follow_redirects=True)
assert resp.status_code == 200
assert not SessionManager.is_user_logged_in(db_session=db.session)
text = resp.data.decode('utf-8')
assert 'First submission' in text
assert not SessionManager.is_user_logged_in(db_session=db.session)
def test_login_with_invalid_codename(source_app):
"""Logging in with a codename with invalid characters should return
an informative message to the user."""
invalid_codename = '[]'
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data=dict(codename=invalid_codename),
follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Invalid input." in text
def test_source_session_expiration(source_app):
with source_app.test_client() as app:
# Given a source user who logs in
codename = new_codename(app, session)
resp = app.post(url_for('main.login'),
data=dict(codename=codename),
follow_redirects=True)
assert resp.status_code == 200
# But we're now 6 hours later hence their session expired
with mock.patch("source_app.session_manager.datetime") as mock_datetime:
six_hours_later = datetime.now(timezone.utc) + timedelta(hours=6)
mock_datetime.now.return_value = six_hours_later
# When they browse to an authenticated page
resp = app.get(url_for('main.lookup'), follow_redirects=True)
# They get redirected to the index page with the "logged out" message
text = resp.data.decode('utf-8')
assert 'You were logged out due to inactivity' in text
def test_source_session_expiration_create(source_app):
with source_app.test_client() as app:
# Given a source user who is in the middle of the account creation flow
resp = app.post(url_for('main.generate'), data=GENERATE_DATA)
assert resp.status_code == 200
# But we're now 6 hours later hence they did not finish the account creation flow in time
with mock.patch("source_app.main.datetime") as mock_datetime:
six_hours_later = datetime.now(timezone.utc) + timedelta(hours=6)
mock_datetime.now.return_value = six_hours_later
# When the user tries to complete the create flow
resp = app.post(url_for('main.create'), follow_redirects=True)
# They get redirected to the index page with the "logged out" message
text = resp.data.decode('utf-8')
assert 'You were logged out due to inactivity' in text
def test_source_no_session_expiration_message_when_not_logged_in(source_app):
with source_app.test_client() as app:
# Given an unauthenticated source user
resp = app.get(url_for('main.index'))
assert resp.status_code == 200
# And their session expired
with mock.patch("source_app.session_manager.datetime") as mock_datetime:
six_hours_later = datetime.utcnow() + timedelta(hours=6)
mock_datetime.now.return_value = six_hours_later
# When they browse again the index page
refreshed_resp = app.get(url_for('main.index'), follow_redirects=True)
# The session expiration message is NOT displayed
text = refreshed_resp.data.decode('utf-8')
assert 'You were logged out due to inactivity' not in text
def test_csrf_error_page(source_app):
source_app.config['WTF_CSRF_ENABLED'] = True
with source_app.test_client() as app:
with InstrumentedApp(source_app) as ins:
resp = app.post(url_for('main.create'))
ins.assert_redirects(resp, url_for('main.index'))
resp = app.post(url_for('main.create'), follow_redirects=True)
text = resp.data.decode('utf-8')
assert 'You were logged out due to inactivity' in text
def test_source_can_only_delete_own_replies(source_app, app_storage):
'''This test checks for a bug an authenticated source A could delete
replies send to source B by "guessing" the filename.
'''
source0, codename0 = utils.db_helper.init_source(app_storage)
source1, codename1 = utils.db_helper.init_source(app_storage)
journalist, _ = utils.db_helper.init_journalist()
replies = utils.db_helper.reply(app_storage, journalist, source0, 1)
filename = replies[0].filename
confirmation_msg = 'Reply deleted'
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data={'codename': codename1},
follow_redirects=True)
assert resp.status_code == 200
assert SessionManager.get_logged_in_user(db_session=db.session).db_record_id == source1.id
resp = app.post(url_for('main.delete'),
data={'reply_filename': filename},
follow_redirects=True)
assert resp.status_code == 404
assert confirmation_msg not in resp.data.decode('utf-8')
reply = Reply.query.filter_by(filename=filename).one()
assert not reply.deleted_by_source
with source_app.test_client() as app:
resp = app.post(url_for('main.login'),
data={'codename': codename0},
follow_redirects=True)
assert resp.status_code == 200
assert SessionManager.get_logged_in_user(db_session=db.session).db_record_id == source0.id
resp = app.post(url_for('main.delete'),
data={'reply_filename': filename},
follow_redirects=True)
assert resp.status_code == 200
assert confirmation_msg in resp.data.decode('utf-8')
reply = Reply.query.filter_by(filename=filename).one()
assert reply.deleted_by_source
def test_robots_txt(source_app):
"""Test that robots.txt works"""
with source_app.test_client() as app:
# Not using url_for here because we care about the actual URL path
resp = app.get('/robots.txt')
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert 'Disallow: /' in text
|
freedomofpress/securedrop
|
securedrop/tests/test_source.py
|
Python
|
agpl-3.0
| 35,531
|
[
"VisIt"
] |
e76f391ca20a4c564a35d708678fef915e83f3adb9a6868f06839a6f328cd4ac
|
#!/usr/bin/env python
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
from pyscf import gto
from pyscf import scf
'''
Density fitting method by decorating the scf object with scf.density_fit function.
There is no flag to control the program to do density fitting for 2-electron
integration. The way to call density fitting is to decorate the existed scf
object with scf.density_fit function.
NOTE scf.density_fit function generates a new object, which works exactly the
same way as the regular scf method. The density fitting scf object is an
independent object to the regular scf object which is to be decorated. By
doing so, density fitting can be applied anytime, anywhere in your script
without affecting the exsited scf object.
See also:
examples/df/00-with_df.py
examples/df/01-auxbasis.py
'''
mol = gto.Mole()
mol.build(
verbose = 0,
atom = '''8 0 0. 0
1 0 -0.757 0.587
1 0 0.757 0.587''',
basis = 'ccpvdz',
)
mf = scf.density_fit(scf.RHF(mol))
energy = mf.kernel()
print('E = %.12f, ref = -76.026744737355' % energy)
#
# Stream style: calling .density_fit method to return a DF-SCF object.
#
mf = scf.RHF(mol).density_fit()
energy = mf.kernel()
print('E = %.12f, ref = -76.026744737355' % energy)
#
# By default optimal auxiliary basis (if possible) or even-tempered gaussian
# functions are used fitting basis. You can assign with_df.auxbasis to change
# the change the fitting basis.
#
mol.spin = 1
mol.charge = 1
mol.build(0, 0)
mf = scf.UKS(mol).density_fit()
mf.with_df.auxbasis = 'cc-pvdz-jkfit'
energy = mf.kernel()
print('E = %.12f, ref = -75.390366559552' % energy)
# Switch off density fitting
mf.with_df = False
energy = mf.kernel()
print('E = %.12f, ref = %.12f' % (energy, scf.UKS(mol).kernel()))
|
gkc1000/pyscf
|
examples/scf/20-density_fitting.py
|
Python
|
apache-2.0
| 1,784
|
[
"Gaussian",
"PySCF"
] |
807097fd11fc17b15170f246cf66ae4c34cd18aa73c2e68c7ed8856ad0b60c15
|
# Copyright (C) 2013-2015 MetaMorph Software, Inc
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
# =======================
# This version of the META tools is a fork of an original version produced
# by Vanderbilt University's Institute for Software Integrated Systems (ISIS).
# Their license statement:
# Copyright (C) 2011-2014 Vanderbilt University
# Developed with the sponsorship of the Defense Advanced Research Projects
# Agency (DARPA) and delivered to the U.S. Government with Unlimited Rights
# as defined in DFARS 252.227-7013.
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this data, including any software or models in source or binary
# form, as well as any drawings, specifications, and documentation
# (collectively "the Data"), to deal in the Data without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Data, and to
# permit persons to whom the Data is furnished to do so, subject to the
# following conditions:
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Data.
# THE DATA IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS, SPONSORS, DEVELOPERS, CONTRIBUTORS, OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE DATA OR THE USE OR OTHER DEALINGS IN THE DATA.
## Chelsea He and Emily Clements, MIT
## estimate_complexity.py
## June 26, 2012
##
## This function estimates the complexity of a random variable Z
## based on either the pdf of Z or samples of Z. The complexity
## metric used is exponential entropy as defined by Campbell (1966)
# Import libraries
from numpy import *
import scipy.stats.kde as kde
from scipy.stats import norm
def with_distribution(dist,limits,mean,variance,numbins):
if limits[0] > -inf:
lb = limits[0]-3*math.sqrt(variance)
else:
lb = mean-5*math.sqrt(variance)
if limits[1] < inf:
ub = limits[1]+3*math.sqrt(variance)
else:
ub = mean+5*math.sqrt(variance)
bins = linspace(lb,ub,numbins)
# Generate Gaussian pdf
f_z = norm.pdf(bins, mean, math.sqrt(variance))
# Estimate complexity based on pdf
return with_pdf(bins,f_z)
############### Method I: supply pdf ###############
def with_pdf(bins,f_z):
# Compute bin size
binsize = bins[1]-bins[0]
# Initialize entropy value with log(binsize) -- correction term for discretizing pdf
entsum = log(binsize)
# Compute differential entropy and complexity
for fz in f_z:
# Consider only terms where f_z > 0 (otherwise log(f_z) --> log(0) will cause trouble)
if fz*binsize > 1e-320:
entsum = entsum - fz*binsize*log(fz*binsize)
entropy = entsum
complexity = exp(entropy)
return complexity
####### Method II: supply Monte Carlo samples #######
def with_samples(Z,numbins):
# Turn list into array
z = array(Z)
# Density estimation, discretized into bins
bins = linspace(min(z),max(z),numbins)
binsize = bins[1]-bins[0]
f_z = kde.gaussian_kde(z).evaluate(bins)
# Initialize entropy value with log(binsize) -- correction term for discretizing pdf
entsum = log(binsize)
# Compute differential entropy and complexity
for fz in f_z:
# Consider only terms where f_z > 0 (otherwise log(f_z) --> log(0) will cause trouble)
if fz > 0:
entsum = entsum - fz*binsize*log(fz*binsize)
entropy = entsum
complexity = exp(entropy)
return complexity
|
pombredanne/metamorphosys-desktop
|
metamorphosys/META/src/Python27Packages/PCC/PCC/estimate_complexity.py
|
Python
|
mit
| 5,167
|
[
"Gaussian"
] |
e71c7bfa4739c6694ccfbf137568ca81dab2e59269acc30da828eb6449b87427
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: TTbar_Tauola_13TeV_cfi.py --conditions auto:startup -n 1000 --eventcontent FEVTDEBUG --relval 9000,100 -s GEN,SIM --datatier GEN-SIM --no_exec
import FWCore.ParameterSet.Config as cms
process = cms.Process('SIM')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.Geometry.GeometrySimDB_cff')
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic8TeVCollision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.SimIdeal_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1000)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.19 $'),
annotation = cms.untracked.string('TTbar_Tauola_13TeV_cfi.py nevts:1000'),
name = cms.untracked.string('Applications')
)
# Output definition
process.FEVTDEBUGoutput = cms.OutputModule("PoolOutputModule",
splitLevel = cms.untracked.int32(0),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
outputCommands = process.FEVTDEBUGEventContent.outputCommands,
fileName = cms.untracked.string('TTbar_Tauola_13TeV_cfi_py_GEN_SIM.root'),
dataset = cms.untracked.PSet(
filterName = cms.untracked.string(''),
dataTier = cms.untracked.string('GEN-SIM')
),
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:startup', '')
process.generator = cms.EDFilter("Pythia6GeneratorFilter",
ExternalDecays = cms.PSet(
Tauola = cms.untracked.PSet(
UseTauolaPolarization = cms.bool(True),
InputCards = cms.PSet(
mdtau = cms.int32(0),
pjak2 = cms.int32(0),
pjak1 = cms.int32(0)
)
),
parameterSets = cms.vstring('Tauola')
),
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(1.0),
pythiaHepMCVerbosity = cms.untracked.bool(False),
comEnergy = cms.double(13000.0),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythiaUESettings = cms.vstring('MSTU(21)=1 ! Check on possible errors during program execution',
'MSTJ(22)=2 ! Decay those unstable particles',
'PARJ(71)=10 . ! for which ctau 10 mm',
'MSTP(33)=0 ! no K factors in hard cross sections',
'MSTP(2)=1 ! which order running alphaS',
'MSTP(51)=10042 ! structure function chosen (external PDF CTEQ6L1)',
'MSTP(52)=2 ! work with LHAPDF',
'PARP(82)=1.921 ! pt cutoff for multiparton interactions',
'PARP(89)=1800. ! sqrts for which PARP82 is set',
'PARP(90)=0.227 ! Multiple interactions: rescaling power',
'MSTP(95)=6 ! CR (color reconnection parameters)',
'PARP(77)=1.016 ! CR',
'PARP(78)=0.538 ! CR',
'PARP(80)=0.1 ! Prob. colored parton from BBR',
'PARP(83)=0.356 ! Multiple interactions: matter distribution parameter',
'PARP(84)=0.651 ! Multiple interactions: matter distribution parameter',
'PARP(62)=1.025 ! ISR cutoff',
'MSTP(91)=1 ! Gaussian primordial kT',
'PARP(93)=10.0 ! primordial kT-max',
'MSTP(81)=21 ! multiple parton interactions 1 is Pythia default',
'MSTP(82)=4 ! Defines the multi-parton model'),
processParameters = cms.vstring('MSEL = 0 ! User defined processes',
'MSUB(81) = 1 ! qqbar to QQbar',
'MSUB(82) = 1 ! gg to QQbar',
'MSTP(7) = 6 ! flavour = top',
'PMAS(6,1) = 175. ! top quark mass'),
parameterSets = cms.vstring('pythiaUESettings',
'processParameters')
)
)
# Path and EndPath definitions
process.generation_step = cms.Path(process.pgen)
process.simulation_step = cms.Path(process.psim)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.FEVTDEBUGoutput_step = cms.EndPath(process.FEVTDEBUGoutput)
# Schedule definition
process.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.endjob_step,process.FEVTDEBUGoutput_step)
# filter all path with the production filter sequence
for path in process.paths:
getattr(process,path)._seq = process.generator * getattr(process,path)._seq
|
rovere/productions
|
TTbar_Tauola_13TeV_cfi_py_GEN_SIM.py
|
Python
|
gpl-3.0
| 5,681
|
[
"Gaussian"
] |
63b32047610beec05c2382795f7b25d1fafedba0e4d23a888c61d3ab021e7c08
|
"""
# Copyright (C) 2007 Rob King (rob@e-mu.org)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# For questions regarding this module contact
# Rob King <rob@e-mu.org> or visit http://www.e-mu.org
#
LiveTelnet is a very simple telnet server that works in the version of Python
included in Ableton Live. To install, first make sure you have installed
Python 2.2.x in c:\Python22 (we use some of it's modules which are not included
in Ableton's version). Next place all this directory inside the MIDI Remote
Scripts directory of Ableton:
e.g. C:\Program Files\Ableton\Live 6.0.7\Resources\MIDI Remote Scripts
When you load up Ableton you should find the LiveAPI control surface listed
in Preferences > MIDI/Sync, select it.
Now you can use the telnet client of your choice to telnet to localhost port 23
where you will get an Interactive Python interpreter.
To get started quickly take a look in LiveUtils.py
"""
import Live
import sys, StringIO, socket, code
from _LiveAPICore import LiveUtils
class LiveTelnet:
__module__ = __name__
__doc__ = "Main class that establishes the Live Telnet"
def __init__(self, c_instance):
self._LiveTelnet__c_instance = c_instance
self.originalstdin = sys.stdin
self.originalstdout = sys.stdout
self.originalstderr = sys.stderr
self.stdin = StringIO.StringIO()
self.stdout = StringIO.StringIO()
self.stderr = StringIO.StringIO()
self.telnetSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.telnetSocket.bind( ('', 23) )
self.telnetSocket.setblocking(False)
self.telnetSocket.listen(1)
self.telnetConnection = None
self.interpreter = code.InteractiveConsole(globals())
self.telnetBuffer = ""
self.lastData = ""
self.commandBuffer = []
def disconnect(self):
#Be nice and return stdio to their original owners
sys.stdin = self.originalstdin
sys.stdout = self.originalstdout
sys.stderr = self.originalstderr
self.telnetSocket.close()
def connect_script_instances(self, instanciated_scripts):
"""
Called by the Application as soon as all scripts are initialized.
You can connect yourself to other running scripts here, as we do it
connect the extension modules
"""
return
def application(self):
"""returns a reference to the application that we are running in"""
return Live.Application.get_application()
def song(self):
"""returns a reference to the Live Song that we do interact with"""
return self._LiveTelnet__c_instance.song()
def handle(self):
"""returns a handle to the c_interface that is needed when forwarding MIDI events via the MIDI map"""
return self._LiveTelnet__c_instance.handle()
def refresh_state(self):
"""I'm sure this does something useful.."""
return
def is_extension(self):
return False
def request_rebuild_midi_map(self):
"""
To be called from any components, as soon as their internal state changed in a
way, that we do need to remap the mappings that are processed directly by the
Live engine.
Dont assume that the request will immediately result in a call to
your build_midi_map function. For performance reasons this is only
called once per GUI frame.
"""
return
def build_midi_map(self, midi_map_handle):
"""
New MIDI mappings can only be set when the scripts 'build_midi_map' function
is invoked by our C instance sibling. Its either invoked when we have requested it
(see 'request_rebuild_midi_map' above) or when due to a change in Lives internal state,
a rebuild is needed.
"""
return
def update_display(self):
#Updates every 100ms
#Keep trying to accept a connection until someone actually connects
if not self.telnetConnection:
try:
#Does anyone want to connect?
self.telnetConnection, self.addr = self.telnetSocket.accept()
except:
#No one connected in this iteration
pass
else:
#Yay! Someone connected! Send them the banner and first prompt.
self.telnetConnection.send("Welcome to the Ableton Live Python Interpreter (Python 2.2.1)\r\n")
self.telnetConnection.send("Brought to by LiveAPI.org\r\n")
self.telnetConnection.send(">>> ")
else:
#Someone's connected, so lets interact with them.
try:
#If the client has typed anything, get it
data = self.telnetConnection.recv(1)
except:
#Nope they haven't typed anything yet
data = "" #
#If return is pressed, process the command (This if statement is so ugly because ableton python doesn't have universal newlines)
if (data == "\n" or data == "\r") and (self.lastData != "\n" and self.lastData != "\r"):
continues = self.interpreter.push(self.telnetBuffer.rstrip()) #should be strip("/r/n") but ableton python throws an error
self.commandBuffer.append(self.telnetBuffer.rstrip())
self.telnetBuffer = ""
#if the user input is multi-line, continue, otherwise return the results
if continues:
self.telnetConnection.send("... ")
else:
#return stdout to the client
self.telnetConnection.send(self.stdout.getvalue().replace("\n","\r\n"))
#return stderr to the client
self.telnetConnection.send(self.stderr.getvalue().replace("\n","\r\n"))
self.telnetConnection.send(">>> ")
#Empty buffers by creating new stringIO objects
#There's probably a better way to empty these
self.stdin.close()
self.stdout.close()
self.stderr.close()
self.stdin = StringIO.StringIO()
self.stdout = StringIO.StringIO()
self.stderr = StringIO.StringIO()
#re-redirect the stdio
sys.stdin = self.stdin
sys.stdout = self.stdout
sys.stderr = self.stderr
elif data == "\b": #deals with backspaces
if len(self.telnetBuffer):
self.telnetBuffer = self.telnetBuffer[:-1]
self.telnetConnection.send(" \b") #deletes the character on the console
else:
self.telnetConnection.send(" ")
elif data != "\n" and data != "\r":
self.telnetBuffer = self.telnetBuffer + data
self.lastData = data
def send_midi(self, midi_event_bytes):
"""
Use this function to send MIDI events through Live to the _real_ MIDI devices
that this script is assigned to.
"""
pass
def receive_midi(self, midi_bytes):
return
def can_lock_to_devices(self):
return False
def suggest_input_port(self):
return ''
def suggest_output_port(self):
return ''
def suggest_map_mode(self, cc_no):
result = Live.MidiMap.MapMode.absolute
if (cc_no in range(FID_PANNING_BASE, (FID_PANNING_BASE + NUM_CHANNEL_STRIPS))):
result = Live.MidiMap.MapMode.relative_signed_bit
return result
def __handle_display_switch_ids(self, switch_id, value):
pass
|
derivativeinc/liveapi
|
src/LiveTelnet/LiveTelnet.py
|
Python
|
lgpl-2.1
| 8,633
|
[
"VisIt"
] |
55cb6bfe5814104bc3178e3eaafd51c9e746770f20afb27061763a15875d8b1c
|
# $Id: ShowFeats.py 537 2007-08-20 14:54:35Z landrgr1 $
#
# Created by Greg Landrum Aug 2006
#
#
_version = "0.3.2"
_usage = """
ShowFeats [optional args] <filenames>
if "-" is provided as a filename, data will be read from stdin (the console)
"""
_welcomeMessage = "This is ShowFeats version %s" % (_version)
import math
#set up the logger:
from rdkit import RDLogger as logging
logger = logging.logger()
logger.setLevel(logging.INFO)
from rdkit import Geometry
from rdkit.Chem.Features import FeatDirUtilsRD as FeatDirUtils
_featColors = {
'Donor': (0, 1, 1),
'Acceptor': (1, 0, 1),
'NegIonizable': (1, 0, 0),
'PosIonizable': (0, 0, 1),
'ZnBinder': (1, .5, .5),
'Aromatic': (1, .8, .2),
'LumpedHydrophobe': (.5, .25, 0),
'Hydrophobe': (.5, .25, 0),
}
def _getVectNormal(v, tol=1e-4):
if math.fabs(v.x) > tol:
res = Geometry.Point3D(v.y, -v.x, 0)
elif math.fabs(v.y) > tol:
res = Geometry.Point3D(-v.y, v.x, 0)
elif math.fabs(v.z) > tol:
res = Geometry.Point3D(1, 0, 0)
else:
raise ValueError('cannot find normal to the null vector')
res.Normalize()
return res
_canonArrowhead = None
def _buildCanonArrowhead(headFrac, nSteps, aspect):
global _canonArrowhead
startP = RDGeometry.Point3D(0, 0, headFrac)
_canonArrowhead = [startP]
scale = headFrac * aspect
baseV = RDGeometry.Point3D(scale, 0, 0)
_canonArrowhead.append(baseV)
twopi = 2 * math.pi
for i in range(1, nSteps):
v = RDGeometry.Point3D(scale * math.cos(i * twopi), scale * math.sin(i * twopi), 0)
_canonArrowhead.append(v)
_globalArrowCGO = []
_globalSphereCGO = []
# taken from pymol's cgo.py
BEGIN = 2
END = 3
TRIANGLE_FAN = 6
COLOR = 6
VERTEX = 4
NORMAL = 5
SPHERE = 7
CYLINDER = 9
ALPHA = 25
def _cgoArrowhead(viewer, tail, head, radius, color, label, headFrac=0.3, nSteps=10, aspect=.5):
global _globalArrowCGO
delta = head - tail
normal = _getVectNormal(delta)
delta.Normalize()
dv = head - tail
dv.Normalize()
dv *= headFrac
startP = head
normal *= headFrac * aspect
cgo = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, dv.x, dv.y, dv.z, VERTEX,
head.x + dv.x, head.y + dv.y, head.z + dv.z]
base = [BEGIN, TRIANGLE_FAN, COLOR, color[0], color[1], color[2], NORMAL, -dv.x, -dv.y, -dv.z,
VERTEX, head.x, head.y, head.z]
v = startP + normal
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
for i in range(1, nSteps):
v = FeatDirUtils.ArbAxisRotation(360. / nSteps * i, delta, normal)
cgo.extend([NORMAL, v.x, v.y, v.z])
v += startP
cgo.extend([VERTEX, v.x, v.y, v.z])
base.extend([VERTEX, v.x, v.y, v.z])
cgo.extend([NORMAL, normal.x, normal.y, normal.z])
cgo.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
base.extend([VERTEX, startP.x + normal.x, startP.y + normal.y, startP.z + normal.z])
cgo.append(END)
base.append(END)
cgo.extend(base)
#viewer.server.renderCGO(cgo,label)
_globalArrowCGO.extend(cgo)
def ShowArrow(viewer, tail, head, radius, color, label, transparency=0, includeArrowhead=True):
global _globalArrowCGO
if transparency:
_globalArrowCGO.extend([ALPHA, 1 - transparency])
else:
_globalArrowCGO.extend([ALPHA, 1])
_globalArrowCGO.extend([CYLINDER,
tail.x,
tail.y,
tail.z,
head.x,
head.y,
head.z,
radius * .10,
color[0],
color[1],
color[2],
color[0],
color[1],
color[2], ])
if includeArrowhead:
_cgoArrowhead(viewer, tail, head, radius, color, label)
def ShowMolFeats(mol, factory, viewer, radius=0.5, confId=-1, showOnly=True, name='',
transparency=0.0, colors=None, excludeTypes=[], useFeatDirs=True, featLabel=None,
dirLabel=None, includeArrowheads=True, writeFeats=False, showMol=True,
featMapFile=False):
global _globalSphereCGO
if not name:
if mol.HasProp('_Name'):
name = mol.GetProp('_Name')
else:
name = 'molecule'
if not colors:
colors = _featColors
if showMol:
viewer.ShowMol(mol, name=name, showOnly=showOnly, confId=confId)
molFeats = factory.GetFeaturesForMol(mol)
if not featLabel:
featLabel = f'{name}-feats'
viewer.server.resetCGO(featLabel)
if not dirLabel:
dirLabel = featLabel + "-dirs"
viewer.server.resetCGO(dirLabel)
for feat in molFeats:
family = feat.GetFamily()
if family in excludeTypes:
continue
pos = feat.GetPos(confId)
color = colors.get(family, (.5, .5, .5))
if transparency:
_globalSphereCGO.extend([ALPHA, 1 - transparency])
else:
_globalSphereCGO.extend([ALPHA, 1])
_globalSphereCGO.extend([COLOR, color[0], color[1], color[2],
SPHERE, pos.x, pos.y, pos.z, radius])
if writeFeats:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print(f'{family}\t{pos.x:.3f}\t{pos.y:.3f}\t{pos.z:.3f}\t1.0\t# {aidText}')
if featMapFile:
print(f" family={family} pos=({pos.x:.3f}, {pos.y:.3f}, {pos.z:.3f}) weight=1.0",
end='', file=featMapFile)
if useFeatDirs:
ps = []
if family == 'Aromatic':
ps, _ = FeatDirUtils.GetAromaticFeatVects(mol.GetConformer(confId), feat.GetAtomIds(), pos, scale=1.0)
elif family == 'Donor':
aids = feat.GetAtomIds()
if len(aids) == 1:
FeatVectsDictMethod = {1: FeatDirUtils.GetDonor1FeatVects,
2: FeatDirUtils.GetDonor2FeatVects,
3: FeatDirUtils.GetDonor3FeatVects, }
featAtom = mol.GetAtomWithIdx(aids[0])
numHvyNbrs = len([1 for x in featAtom.GetNeighbors() if x.GetAtomicNum() > 1])
ps, _ = FeatVectsDictMethod[numHvyNbrs](mol.GetConformer(confId), aids, scale=1.0)
elif family == 'Acceptor':
aids = feat.GetAtomIds()
if len(aids) == 1:
FeatVectsDictMethod = {1: FeatDirUtils.GetDonor1FeatVects,
2: FeatDirUtils.GetDonor2FeatVects,
3: FeatDirUtils.GetDonor3FeatVects, }
featAtom = mol.GetAtomWithIdx(aids[0])
numHvyNbrs = len([x for x in featAtom.GetNeighbors() if x.GetAtomicNum() > 1])
ps, _ = FeatVectsDictMethod[numHvyNbrs](mol.GetConformer(confId), aids, scale=1.0)
for tail, head in ps:
ShowArrow(viewer, tail, head, radius, color, dirLabel, transparency=transparency,
includeArrowhead=includeArrowheads)
if featMapFile:
vect = head - tail
print(f'dir=({vect.x:.3f}, {vect.y:.3f}, {vect.z:.3f})', end='', file=featMapFile)
if featMapFile:
aidText = ' '.join([str(x + 1) for x in feat.GetAtomIds()])
print(f'# {aidText}', file=featMapFile)
# --- ---- --- ---- --- ---- --- ---- --- ---- --- ----
import sys, os
from rdkit import RDConfig
from optparse import OptionParser
parser = OptionParser(_usage, version='%prog ' + _version)
parser.add_option('-x', '--exclude', default='',
help='provide a list of feature names that should be excluded')
parser.add_option('-f', '--fdef', default=os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef'),
help='provide the name of the feature definition (fdef) file.')
parser.add_option('--noDirs', '--nodirs', dest='useDirs', default=True, action='store_false',
help='do not draw feature direction indicators')
parser.add_option('--noHeads', dest='includeArrowheads', default=True, action='store_false',
help='do not draw arrowheads on the feature direction indicators')
parser.add_option('--noClear', '--noClear', dest='clearAll', default=False, action='store_true',
help='do not clear PyMol on startup')
parser.add_option('--noMols', '--nomols', default=False, action='store_true',
help='do not draw the molecules')
parser.add_option('--writeFeats', '--write', default=False, action='store_true',
help='print the feature information to the console')
parser.add_option('--featMapFile', '--mapFile', default='',
help='save a feature map definition to the specified file')
parser.add_option('--verbose', default=False, action='store_true', help='be verbose')
if __name__ == '__main__':
from rdkit import Chem
from rdkit.Chem import AllChem
from rdkit.Chem.PyMol import MolViewer
options, args = parser.parse_args()
if len(args) < 1:
parser.error('please provide either at least one sd or mol file')
try:
v = MolViewer()
except Exception:
logger.error(
'Unable to connect to PyMol server.\nPlease run ~landrgr1/extern/PyMol/launch.sh to start it.')
sys.exit(1)
if options.clearAll:
v.DeleteAll()
try:
fdef = open(options.fdef, 'r').read()
except IOError:
logger.error('ERROR: Could not open fdef file %s' % options.fdef)
sys.exit(1)
factory = AllChem.BuildFeatureFactoryFromString(fdef)
if options.writeFeats:
print('# Family \tX \tY \tZ \tRadius\t # Atom_ids')
if options.featMapFile:
if options.featMapFile == '-':
options.featMapFile = sys.stdout
else:
options.featMapFile = file(options.featMapFile, 'w+')
print('# Feature map generated by ShowFeats v%s' % _version, file=options.featMapFile)
print("ScoreMode=All", file=options.featMapFile)
print("DirScoreMode=Ignore", file=options.featMapFile)
print("BeginParams", file=options.featMapFile)
for family in factory.GetFeatureFamilies():
print(" family=%s width=1.0 radius=3.0" % family, file=options.featMapFile)
print("EndParams", file=options.featMapFile)
print("BeginPoints", file=options.featMapFile)
i = 1
for midx, molN in enumerate(args):
if molN != '-':
featLabel = f'{molN}_Feats' % molN
else:
featLabel = f'Mol{midx + 1}_Feats'
v.server.resetCGO(featLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.sphere((0, 0, 0), .01, (1, 0, 1), featLabel)
dirLabel = featLabel + "-dirs"
v.server.resetCGO(dirLabel)
# this is a big of kludgery to work around what seems to be a pymol cgo bug:
v.server.cylinder((0, 0, 0), (.01, .01, .01), .01, (1, 0, 1), dirLabel)
if molN != '-':
try:
ms = Chem.SDMolSupplier(molN)
except Exception:
logger.error('Problems reading input file: %s' % molN)
ms = []
else:
ms = Chem.SDMolSupplier()
ms.SetData(sys.stdin.read())
for m in ms:
nm = f'Mol_{i}'
if m.HasProp('_Name'):
nm += '_' + m.GetProp('_Name')
if options.verbose:
if m.HasProp('_Name'):
print("#Molecule: ", m.GetProp('_Name'))
else:
print("#Molecule: ", nm)
ShowMolFeats(m, factory, v, transparency=0.25, excludeTypes=options.exclude, name=nm,
showOnly=False, useFeatDirs=options.useDirs, featLabel=featLabel,
dirLabel=dirLabel, includeArrowheads=options.includeArrowheads,
writeFeats=options.writeFeats, showMol=not options.noMols,
featMapFile=options.featMapFile)
i += 1
if not i % 100:
logger.info(f"Done {i} poses")
if ms:
v.server.renderCGO(_globalSphereCGO, featLabel, 1)
if options.useDirs:
v.server.renderCGO(_globalArrowCGO, dirLabel, 1)
if options.featMapFile:
print("EndPoints", file=options.featMapFile)
sys.exit(0)
|
bp-kelley/rdkit
|
rdkit/Chem/Features/ShowFeats.py
|
Python
|
bsd-3-clause
| 11,904
|
[
"PyMOL",
"RDKit"
] |
363ea4bd1dd97bad1d6cbcd41f95ddefb19f10c1e038f34f4fd8c6c39ba8321a
|
from werkzeug.wsgi import ClosingIterator
def all_casings(input_string):
"""
Permute all casings of a given string.
A pretty algorithm, via @Amber
http://stackoverflow.com/questions/6792803/finding-all-possible-case-permutations-in-python
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
class ZappaWSGIMiddleware(object):
"""
Middleware functions necessary for a Zappa deployment.
Most hacks have now been remove except for Set-Cookie permutation.
"""
def __init__(self, application):
self.application = application
def __call__(self, environ, start_response):
"""
We must case-mangle the Set-Cookie header name or AWS will use only a
single one of these headers.
"""
def encode_response(status, headers, exc_info=None):
"""
Create an APIGW-acceptable version of our cookies.
We have to use a bizarre hack that turns multiple Set-Cookie headers into
their case-permutated format, ex:
Set-cookie:
sEt-cookie:
seT-cookie:
To get around an API Gateway limitation.
This is weird, but better than our previous hack of creating a Base58-encoded
supercookie.
"""
# All the non-cookie headers should be sent unharmed.
# The main app can send 'set-cookie' headers in any casing
# Related: https://github.com/Miserlou/Zappa/issues/990
new_headers = [header for header in headers
if ((type(header[0]) != str) or (header[0].lower() != 'set-cookie'))]
cookie_headers = [header for header in headers
if ((type(header[0]) == str) and (header[0].lower() == "set-cookie"))]
for header, new_name in zip(cookie_headers,
all_casings("Set-Cookie")):
new_headers.append((new_name, header[1]))
return start_response(status, new_headers, exc_info)
# Call the application with our modifier
response = self.application(environ, encode_response)
# Return the response as a WSGI-safe iterator
return ClosingIterator(response)
|
anush0247/Zappa
|
zappa/middleware.py
|
Python
|
mit
| 2,639
|
[
"Amber"
] |
0a47ceb214983c43914d5d0805f4de06e54a01ea86ad4e010f0b3391c9b7ede9
|
import vigra
from vigra import graphs
from vigra import numpy
import pylab
# parameter
filepath = '12003.jpg' # input image path
sigmaGradMag = 5.0 # sigma Gaussian gradient
superpixelDiameter = 10 # super-pixel size
slicWeight = 10.0 # SLIC color - spatial weight
beta = 0.5 # node vs edge weight
nodeNumStop = 50 # desired num. nodes in result
# load image and convert to LAB
img = vigra.impex.readImage(filepath)
# get super-pixels with slic on LAB image
imgLab = vigra.colors.transform_RGB2Lab(img)
labels, nseg = vigra.analysis.slicSuperpixels(imgLab, slicWeight,
superpixelDiameter)
labels = vigra.analysis.labelImage(labels)
# compute gradient on interpolated image
imgLabBig = vigra.resize(imgLab, [imgLab.shape[0]*2-1, imgLab.shape[1]*2-1])
gradMag = vigra.filters.gaussianGradientMagnitude(imgLabBig, sigmaGradMag)
# get 2D grid graph and edgeMap for grid graph
# from gradMag of interpolated image
gridGraph = graphs.gridGraph(img.shape[0:2])
gridGraphEdgeIndicator = graphs.edgeFeaturesFromInterpolatedImage(gridGraph,
gradMag)
# get region adjacency graph from super-pixel labels
rag = graphs.regionAdjacencyGraph(gridGraph, labels)
# accumulate edge weights from gradient magnitude
edgeWeights = rag.accumulateEdgeFeatures(gridGraphEdgeIndicator)
# accumulate node features from grid graph node map
# which is just a plain image (with channels)
nodeFeatures = rag.accumulateNodeFeatures(imgLab)
# do agglomerativeClustering
labels = graphs.agglomerativeClustering(graph=rag, edgeWeights=edgeWeights,
beta=beta, nodeFeatures=nodeFeatures,
nodeNumStop=nodeNumStop,wardness=0.8)
# show result
f = pylab.figure()
ax1 = f.add_subplot(2, 2, 1)
vigra.imshow(gradMag,show=False)
ax1.set_title("Input Image")
pylab.axis('off')
ax2 = f.add_subplot(2, 2, 2)
rag.show(img)
ax2.set_title("Over-Segmentation")
pylab.axis('off')
ax3 = f.add_subplot(2, 2, 3)
rag.show(img, labels)
ax3.set_title("Result-Segmentation")
pylab.axis('off')
ax4 = f.add_subplot(2, 2, 4)
rag.showNested(img, labels)
ax4.set_title("Result-Segmentation")
pylab.axis('off')
vigra.show()
|
dstoe/vigra
|
vigranumpy/examples/graph_agglomerative_clustering.py
|
Python
|
mit
| 2,298
|
[
"Gaussian"
] |
c4070d1ab1d222cea54d3c43afae8007fe970dbd58d6f85259a021f9984f39d7
|
# Copyright (C) 2014
# Pierre de Buyl
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
Like all boundary condition objects, this class implements all the methods of
the base class **BC** , which are described in detail in the documentation of
the abstract class **BC**.
The SlabBC class is responsible for a cuboid boundary condition that is periodic
in all but the "dir" dimension. Currently, dir is set arbirtrarily to "0" (the
x-direction).
Example:
>>> boxsize = (Lx, Ly, Lz)
>>> bc = espressopp.bc.SlabBC(rng, boxsize)
.. py:method:: espressopp.bc.SlabBC(rng, boxL)
:param rng:
:param boxL: (default: 1.0)
:type rng:
:type boxL: real
.. py:method:: espressopp.bc.SlabBC.setBoxL(boxL)
:param boxL:
:type boxL:
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp import toReal3D
from espressopp.bc.BC import *
from _espressopp import bc_SlabBC
class SlabBCLocal(BCLocal, bc_SlabBC):
def __init__(self, rng, boxL=1.0):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup() or pmi.isController:
cxxinit(self, bc_SlabBC, rng, toReal3D(boxL))
# override length property
def setBoxL(self, boxL):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
self.cxxclass.boxL.fset(self, toReal3D(boxL))
boxL = property(bc_SlabBC.boxL.fget, setBoxL)
if pmi.isController :
class SlabBC(BC):
pmiproxydefs = dict(
cls = 'espressopp.bc.SlabBCLocal',
pmiproperty = [ 'boxL' ]
)
|
MrTheodor/espressopp
|
src/bc/SlabBC.py
|
Python
|
gpl-3.0
| 2,456
|
[
"ESPResSo"
] |
b6cc0e1c0f106f4ed6a7cb7b5c439759045fef25edcb62e5e187fe8a24c00c33
|
import logging
import numpy as np
import nibabel as nib
import scipy.ndimage as ndimage
from six import string_types
from .check import check_img
from nilearn._utils import check_niimg
from nilearn.image.image import new_img_like, _fast_smooth_array
log = logging.getLogger(__name__)
# def smooth_volume(nifti_file, smoothmm):
# """
#
# @param nifti_file: string
# @param smoothmm: int
# @return:
# """
# from nipy.algorithms.kernel_smooth import LinearFilter
# from nipy import load_image
# try:
# img = load_image(nifti_file)
# except Exception:
# log.exception('Error reading file {0}.'.format(nifti_file))
# raise
#
# if smoothmm <= 0:
# return img
#
# filter = LinearFilter(img.coordmap, img.shape)
# return filter.smooth(img)
#
def fwhm2sigma(fwhm):
"""Convert a FWHM value to sigma in a Gaussian kernel.
Parameters
----------
fwhm: float or numpy.array
fwhm value or values
Returns
-------
fwhm: float or numpy.array
sigma values
"""
fwhm = np.asarray(fwhm)
return fwhm / np.sqrt(8 * np.log(2))
def sigma2fwhm(sigma):
"""Convert a sigma in a Gaussian kernel to a FWHM value.
Parameters
----------
sigma: float or numpy.array
sigma value or values
Returns
-------
fwhm: float or numpy.array
fwhm values corresponding to `sigma` values
"""
sigma = np.asarray(sigma)
return np.sqrt(8 * np.log(2)) * sigma
def smooth_volume(image, smoothmm):
"""See smooth_img."""
return smooth_imgs(image, smoothmm)
def _smooth_data_array(arr, affine, fwhm, copy=True):
"""Smooth images with a a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
Parameters
----------
arr: numpy.ndarray
3D or 4D array, with image number as last dimension.
affine: numpy.ndarray
Image affine transformation matrix for image.
fwhm: scalar, numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
copy: bool
if True, will make a copy of the input array. Otherwise will directly smooth the input array.
Returns
-------
smooth_arr: numpy.ndarray
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
# Zeroe possible NaNs and Inf in the image.
arr[np.logical_not(np.isfinite(arr))] = 0
try:
# Keep the 3D part of the affine.
affine = affine[:3, :3]
# Convert from FWHM in mm to a sigma.
fwhm_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n)
except:
raise ValueError('Error smoothing the array.')
else:
return arr
def smooth_imgs(images, fwhm):
"""Smooth images using a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of each image in images.
In all cases, non-finite values in input are zeroed.
Parameters
----------
imgs: str or img-like object or iterable of img-like objects
See boyle.nifti.read.read_img
Image(s) to smooth.
fwhm: scalar or numpy.ndarray
Smoothing kernel size, as Full-Width at Half Maximum (FWHM) in millimeters.
If a scalar is given, kernel width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
Returns
-------
smooth_imgs: nibabel.Nifti1Image or list of.
Smooth input image/s.
"""
if fwhm <= 0:
return images
if not isinstance(images, string_types) and hasattr(images, '__iter__'):
only_one = False
else:
only_one = True
images = [images]
result = []
for img in images:
img = check_img(img)
affine = img.get_affine()
smooth = _smooth_data_array(img.get_data(), affine, fwhm=fwhm, copy=True)
result.append(nib.Nifti1Image(smooth, affine))
if only_one:
return result[0]
else:
return result
def _smooth_array(arr, affine, fwhm=None, ensure_finite=True, copy=True, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
arr: numpy.ndarray
4D array, with image number as last dimension. 3D arrays are also
accepted.
affine: numpy.ndarray
(4, 4) matrix, giving affine transformation for image. (3, 3) matrices
are also accepted (only these coefficients are used).
If fwhm='fast', the affine is not used and can be None
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a full-width at half maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the local average value.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed).
ensure_finite: bool
if True, replace every non-finite values (like NaNs) by zero before
filtering.
copy: bool
if True, input array is not modified. False by default: the filtering
is performed in-place.
kwargs: keyword-arguments
Arguments for the ndimage.gaussian_filter1d function.
Returns
=======
filtered_arr: numpy.ndarray
arr, filtered.
Notes
=====
This function is most efficient with arr in C order.
"""
if arr.dtype.kind == 'i':
if arr.dtype == np.int64:
arr = arr.astype(np.float64)
else:
# We don't need crazy precision
arr = arr.astype(np.float32)
if copy:
arr = arr.copy()
if ensure_finite:
# SPM tends to put NaNs in the data outside the brain
arr[np.logical_not(np.isfinite(arr))] = 0
if fwhm == 'fast':
arr = _fast_smooth_array(arr)
elif fwhm is not None:
# Keep only the scale part.
affine = affine[:3, :3]
# Convert from a FWHM to a sigma:
fwhm_over_sigma_ratio = np.sqrt(8 * np.log(2))
vox_size = np.sqrt(np.sum(affine ** 2, axis=0))
sigma = fwhm / (fwhm_over_sigma_ratio * vox_size)
for n, s in enumerate(sigma):
ndimage.gaussian_filter1d(arr, s, output=arr, axis=n, **kwargs)
return arr
def smooth_img(imgs, fwhm, **kwargs):
"""Smooth images by applying a Gaussian filter.
Apply a Gaussian filter along the three first dimensions of arr.
In all cases, non-finite values in input image are replaced by zeros.
This is copied and slightly modified from nilearn:
https://github.com/nilearn/nilearn/blob/master/nilearn/image/image.py
Added the **kwargs argument.
Parameters
==========
imgs: Niimg-like object or iterable of Niimg-like objects
See http://nilearn.github.io/manipulating_images/manipulating_images.html#niimg.
Image(s) to smooth.
fwhm: scalar, numpy.ndarray, 'fast' or None
Smoothing strength, as a Full-Width at Half Maximum, in millimeters.
If a scalar is given, width is identical on all three directions.
A numpy.ndarray must have 3 elements, giving the FWHM along each axis.
If fwhm == 'fast', a fast smoothing will be performed with
a filter [0.2, 1, 0.2] in each direction and a normalisation
to preserve the scale.
If fwhm is None, no filtering is performed (useful when just removal
of non-finite values is needed)
Returns
=======
filtered_img: nibabel.Nifti1Image or list of.
Input image, filtered. If imgs is an iterable, then filtered_img is a
list.
"""
# Use hasattr() instead of isinstance to workaround a Python 2.6/2.7 bug
# See http://bugs.python.org/issue7624
if hasattr(imgs, "__iter__") \
and not isinstance(imgs, string_types):
single_img = False
else:
single_img = True
imgs = [imgs]
ret = []
for img in imgs:
img = check_niimg(img)
affine = img.get_affine()
filtered = _smooth_array(img.get_data(), affine, fwhm=fwhm,
ensure_finite=True, copy=True, **kwargs)
ret.append(new_img_like(img, filtered, affine, copy_header=True))
if single_img:
return ret[0]
else:
return ret
|
Neurita/boyle
|
boyle/nifti/smooth.py
|
Python
|
bsd-3-clause
| 9,313
|
[
"Gaussian"
] |
ad0e8bb4ffc002b836744f814e299580fae1d1ba99af0a91f2d3aea12ea51bd5
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkExtractEdges(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkExtractEdges(), 'Processing.',
('vtkDataSet',), ('vtkPolyData',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkExtractEdges.py
|
Python
|
bsd-3-clause
| 484
|
[
"VTK"
] |
1a9a4783dd0ad97c0ff2fcf6a80672c14cdb4133a831a0d0c56624f63684110a
|
from galaxy.util import bunch
import logging
log = logging.getLogger( __name__ )
#class Bunch( dict ):
# """
# Bunch based on a dict
# """
# def __getattr__( self, key ):
# if key not in self:
# raise AttributeError(key)
# return self[key]
#
# def __setattr__( self, key, value ):
# self[key] = value
def form( *args, **kwargs ):
return FormBuilder( *args, **kwargs )
class FormBuilder( object ):
"""
Simple class describing an HTML form
"""
def __init__( self, action="", title="", name="form", submit_text="submit", use_panels=False ):
self.title = title
self.name = name
self.action = action
self.submit_text = submit_text
self.inputs = []
self.use_panels = use_panels
def add_input( self, type, name, label, value=None, error=None, help=None, use_label=True ):
self.inputs.append( FormInput( type, label, name, value, error, help, use_label ) )
return self
def add_text( self, name, label, value=None, error=None, help=None ):
return self.add_input( 'text', label, name, value, error, help )
def add_password( self, name, label, value=None, error=None, help=None ):
return self.add_input( 'password', label, name, value, error, help )
def add_select( self, name, label, value=None, options=[], error=None, help=None, use_label=True ):
self.inputs.append( SelectInput( name, label, value=value, options=options, error=error, help=help, use_label=use_label ) )
return self
class FormInput( object ):
"""
Simple class describing a form input element
"""
def __init__( self, type, name, label, value=None, error=None, help=None, use_label=True, extra_attributes={}, **kwargs ):
self.type = type
self.name = name
self.label = label
self.value = value
self.error = error
self.help = help
self.use_label = use_label
self.extra_attributes = extra_attributes
class DatalistInput( FormInput ):
""" Data list input """
def __init__( self, name, *args, **kwargs ):
if 'extra_attributes' not in kwargs:
kwargs[ 'extra_attributes' ] = {}
kwargs[ 'extra_attributes' ][ 'list' ] = name
FormInput.__init__( self, None, name, *args, **kwargs )
self.options = kwargs.get( 'options', {} )
def body_html( self ):
options = "".join( [ "<option value='%s'>%s</option>" % ( key, value ) for key, value in self.options.iteritems() ] )
return """<datalist id="%s">%s</datalist>""" % ( self.name, options )
class SelectInput( FormInput ):
""" A select form input. """
def __init__( self, name, label, value=None, options=[], error=None, help=None, use_label=True ):
FormInput.__init__( self, "select", name, label, value=value, error=error, help=help, use_label=use_label )
self.options = options
class FormData( object ):
"""
Class for passing data about a form to a template, very rudimentary, could
be combined with the tool form handling to build something more general.
"""
def __init__( self ):
#TODO: galaxy's two Bunchs are defined differently. Is this right?
self.values = bunch.Bunch()
self.errors = bunch.Bunch()
|
mikel-egana-aranguren/SADI-Galaxy-Docker
|
galaxy-dist/lib/galaxy/web/framework/formbuilder.py
|
Python
|
gpl-3.0
| 3,315
|
[
"Galaxy"
] |
b4b850784de190818b3fc5c11d82f69ca1cab76445108e29109fa1fb8ea818f1
|
"""Constants used by AnsibleLint."""
import os.path
import sys
# mypy/pylint idiom for py36-py38 compatibility
# https://github.com/python/typeshed/issues/3500#issuecomment-560958608
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module
else:
from typing_extensions import Literal
DEFAULT_RULESDIR = os.path.join(os.path.dirname(__file__), 'rules')
CUSTOM_RULESDIR_ENVVAR = "ANSIBLE_LINT_CUSTOM_RULESDIR"
INVALID_CONFIG_RC = 2
ANSIBLE_FAILURE_RC = 3
ANSIBLE_MISSING_RC = 4
INVALID_PREREQUISITES_RC = 10
EXIT_CONTROL_C_RC = 130
# Minimal version of Ansible we support for runtime
ANSIBLE_MIN_VERSION = "2.9"
# Based on https://docs.ansible.com/ansible/latest/reference_appendices/config.html
ANSIBLE_DEFAULT_ROLES_PATH = (
"~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles"
)
ANSIBLE_MOCKED_MODULE = """\
# This is a mocked Ansible module generated by ansible-lint
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
module: {name}
short_description: Mocked
version_added: "1.0.0"
description: Mocked
author:
- ansible-lint (@nobody)
'''
EXAMPLES = '''mocked'''
RETURN = '''mocked'''
def main():
result = dict(
changed=False,
original_message='',
message='')
module = AnsibleModule(
argument_spec=dict(),
supports_check_mode=True,
)
module.exit_json(**result)
if __name__ == "__main__":
main()
"""
FileType = Literal[
"playbook",
"meta", # role meta
"tasks", # includes pre_tasks, post_tasks
"handlers", # very similar to tasks but with some specificts
# https://docs.ansible.com/ansible/latest/galaxy/user_guide.html#installing-roles-and-collections-from-the-same-requirements-yml-file
"requirements",
"role", # that is a folder!
"yaml", # generic yaml file, previously reported as unknown file type
"", # unknown file type
]
# odict is the base class used to represent data model of Ansible
# playbooks and tasks.
odict = dict
if sys.version_info[:2] < (3, 7):
try:
# pylint: disable=unused-import
from collections import OrderedDict as odict # noqa: 401
except ImportError:
pass
# Deprecated tags/ids and their newer names
RENAMED_TAGS = {
'102': 'no-jinja-when',
'104': 'deprecated-bare-vars',
'105': 'deprecated-module',
'106': 'role-name',
'202': 'risky-octal',
'203': 'no-tabs',
'205': 'playbook-extension',
'206': 'var-spacing',
'207': 'no-jinja-nesting',
'208': 'risky-file-permissions',
'301': 'no-changed-when',
'302': 'deprecated-command-syntax',
'303': 'command-instead-of-module',
'304': 'inline-env-var',
'305': 'command-instead-of-shell',
'306': 'risky-shell-pipe',
'401': 'git-latest',
'402': 'hg-latest',
'403': 'package-latest',
'404': 'no-relative-paths',
'501': 'partial-become',
'502': 'unnamed-task',
'503': 'no-handler',
'504': 'deprecated-local-action',
'505': 'missing-import',
'601': 'literal-compare',
'602': 'empty-string-compare',
'701': 'meta-no-info',
'702': 'meta-no-tags',
'703': 'meta-incorrect',
'704': 'meta-video-links',
'911': 'syntax-check',
}
|
ansible/ansible-lint
|
src/ansiblelint/constants.py
|
Python
|
mit
| 3,254
|
[
"Galaxy"
] |
37551a5c69c9697831fb8f14c9c4fc8345962eaac692a2b4273d50529272af98
|
# $Id$
#
# Copyright (C) 2006 Greg Landrum
#
# @@ All Rights Reserved @@
# This file is part of the RDKit.
# The contents are covered by the terms of the BSD license
# which is included in the file license.txt, found at the root
# of the RDKit source tree.
#
from rdkit.Chem import ChemicalFeatures
class FeatMapPoint(ChemicalFeatures.FreeChemicalFeature):
weight = 0.0
featDirs = None
def __init__(self, *args, **kwargs):
ChemicalFeatures.FreeChemicalFeature.__init__(self, *args, **kwargs)
self.featDirs = []
def initFromFeat(self, feat):
"""
>>> from rdkit import Geometry
>>> sfeat = ChemicalFeatures.FreeChemicalFeature('Aromatic','Foo',Geometry.Point3D(0,0,0))
>>> fmp = FeatMapPoint()
>>> fmp.initFromFeat(sfeat)
>>> fmp.GetFamily()==sfeat.GetFamily()
True
>>> fmp.GetType()==sfeat.GetType()
True
>>> list(fmp.GetPos())
[0.0, 0.0, 0.0]
>>> fmp.featDirs == []
True
>>> sfeat.featDirs = [Geometry.Point3D(1.0,0,0)]
>>> fmp.initFromFeat(sfeat)
>>> len(fmp.featDirs)
1
"""
self.SetFamily(feat.GetFamily())
self.SetType(feat.GetType())
self.SetPos(feat.GetPos())
if hasattr(feat, 'featDirs'):
self.featDirs = feat.featDirs[:]
def GetDist2(self, other):
"""
>>> from rdkit import Geometry
>>> sfeat = ChemicalFeatures.FreeChemicalFeature('Aromatic','Foo',Geometry.Point3D(0,0,0))
>>> fmp = FeatMapPoint()
>>> fmp.initFromFeat(sfeat)
>>> fmp.GetDist2(sfeat)
0.0
>>> sfeat.SetPos(Geometry.Point3D(2,0,0))
>>> fmp.GetDist2(sfeat)
4.0
"""
return (self.GetPos() - other.GetPos()).LengthSq()
def GetDirMatch(self, other, useBest=True):
"""
>>> from rdkit import Geometry
>>> sfeat = ChemicalFeatures.FreeChemicalFeature('Aromatic','Foo',Geometry.Point3D(0,0,0))
>>> fmp = FeatMapPoint()
>>> fmp.initFromFeat(sfeat)
>>> fmp.GetDirMatch(sfeat)
1.0
>>> sfeat.featDirs=[Geometry.Point3D(0,0,1),Geometry.Point3D(0,0,-1)]
>>> fmp.featDirs=[Geometry.Point3D(0,0,1),Geometry.Point3D(1,0,0)]
>>> fmp.GetDirMatch(sfeat)
1.0
>>> fmp.GetDirMatch(sfeat,useBest=True)
1.0
>>> fmp.GetDirMatch(sfeat,useBest=False)
0.0
>>> sfeat.featDirs=[Geometry.Point3D(0,0,1)]
>>> fmp.GetDirMatch(sfeat,useBest=False)
0.5
>>> sfeat.featDirs=[Geometry.Point3D(0,0,1)]
>>> fmp.featDirs=[Geometry.Point3D(0,0,-1)]
>>> fmp.GetDirMatch(sfeat)
-1.0
>>> fmp.GetDirMatch(sfeat,useBest=False)
-1.0
"""
if not self.featDirs or not other.featDirs:
return 1.0
if not useBest:
accum = 0.0
else:
accum = -100000.0
for sDir in self.featDirs:
for oDir in other.featDirs:
d = sDir.DotProduct(oDir)
if useBest:
if d > accum:
accum = d
else:
accum += d
if not useBest:
accum /= len(self.featDirs) * len(other.featDirs)
return accum
# ------------------------------------
#
# doctest boilerplate
#
def _runDoctests(verbose=None): # pragma: nocover
import sys
import doctest
failed, _ = doctest.testmod(optionflags=doctest.ELLIPSIS, verbose=verbose)
sys.exit(failed)
if __name__ == '__main__': # pragma: nocover
_runDoctests()
|
rvianello/rdkit
|
rdkit/Chem/FeatMaps/FeatMapPoint.py
|
Python
|
bsd-3-clause
| 3,283
|
[
"RDKit"
] |
6f429f09c80f37e862bb7b0cbc7d6eb65e1f68897a50e2a1d27bea4648ffe121
|
import requests
import h2o
import h2o_test_utils
from h2o_test_utils import ModelSpec
from h2o_test_utils import GridSpec
def build_and_test(a_node, pp, datasets, algos, algo_additional_default_params):
####################################################################################################
# Build and do basic validation checks on models
####################################################################################################
models_to_build = [
ModelSpec.for_dataset('kmeans_prostate', 'kmeans', datasets['prostate_clustering'], { 'k': 2 } ),
ModelSpec.for_dataset('glm_prostate_regression', 'glm', datasets['prostate_regression'], {'family': 'gaussian'} ),
ModelSpec.for_dataset('glm_prostate_binomial', 'glm', datasets['prostate_binomial'], {'family': 'binomial'} ),
ModelSpec.for_dataset('glm_airlines_binomial', 'glm', datasets['airlines_binomial'], {'response_column': 'IsDepDelayed', 'family': 'binomial' } ),
ModelSpec.for_dataset('glm_iris_multinomial', 'glm', datasets['iris_multinomial'], {'response_column': 'class', 'family': 'multinomial' } ),
ModelSpec.for_dataset('deeplearning_prostate_regression', 'deeplearning', datasets['prostate_regression'], { 'epochs': 1, 'loss': 'Quadratic' } ),
ModelSpec.for_dataset('deeplearning_prostate_binomial', 'deeplearning', datasets['prostate_binomial'], { 'epochs': 1, 'hidden': [20, 20], 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('deeplearning_airlines_binomial', 'deeplearning', datasets['airlines_binomial'], { 'epochs': 1, 'hidden': [10, 10], 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('deeplearning_iris_multinomial', 'deeplearning', datasets['iris_multinomial'], { 'epochs': 1, 'loss': 'CrossEntropy' } ),
ModelSpec.for_dataset('gbm_prostate_regression', 'gbm', datasets['prostate_regression'], { 'ntrees': 5, 'distribution': 'gaussian' } ),
ModelSpec.for_dataset('gbm_prostate_binomial', 'gbm', datasets['prostate_binomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
ModelSpec.for_dataset('gbm_airlines_binomial', 'gbm', datasets['airlines_binomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
ModelSpec.for_dataset('gbm_iris_multinomial', 'gbm', datasets['iris_multinomial'], { 'ntrees': 5, 'distribution': 'multinomial' } ),
]
built_models = {}
for model_spec in models_to_build:
model = model_spec.build_and_validate_model(a_node)
built_models[model_spec['dest_key']] = model
grids_to_build = [
GridSpec.for_dataset('kmeans_prostate_grid', 'kmeans', datasets['prostate_clustering'], { }, { 'k': [2, 3, 4] } ),
GridSpec.for_dataset('glm_prostate_regression_grid', 'glm', datasets['prostate_regression'], {'family': 'gaussian'}, { 'lambda': [0.0001, 0.001, 0.01, 0.1] } ),
GridSpec.for_dataset('glm_prostate_binomial_grid', 'glm', datasets['prostate_binomial'], {'family': 'binomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.1] } ),
GridSpec.for_dataset('glm_airlines_binomial_grid', 'glm', datasets['airlines_binomial'], {'response_column': 'IsDepDelayed', 'family': 'binomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.025] } ),
GridSpec.for_dataset('glm_iris_multinomial_grid', 'glm', datasets['iris_multinomial'], {'response_column': 'class', 'family': 'multinomial'}, { 'lambda': [0.0001, 0.001, 0.01, 0.025] } ),
GridSpec.for_dataset('deeplearning_prostate_regression_grid', 'deeplearning', datasets['prostate_regression'], { 'loss': 'Quadratic' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_prostate_binomial_grid', 'deeplearning', datasets['prostate_binomial'], { 'hidden': [20, 20], 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_airlines_binomial_grid', 'deeplearning', datasets['airlines_binomial'], { 'hidden': [10, 10], 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('deeplearning_iris_multinomial_grid', 'deeplearning', datasets['iris_multinomial'], { 'loss': 'CrossEntropy' }, { 'epochs': [0.1, 0.5, 1] } ),
GridSpec.for_dataset('gbm_prostate_regression_grid', 'gbm', datasets['prostate_regression'], { 'max_depth': 3 }, { 'ntrees': [1, 5, 10], 'distribution': ["gaussian", "poisson", "gamma", "tweedie"] } ),
GridSpec.for_dataset('gbm_prostate_binomial_grid', 'gbm', datasets['prostate_binomial'], { }, { 'ntrees': [5, 7], 'max_depth': [1, 3, 5] } ),
GridSpec.for_dataset('gbm_airlines_binomial_grid', 'gbm', datasets['airlines_binomial'], { 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
GridSpec.for_dataset('gbm_iris_multinomial_grid', 'gbm', datasets['iris_multinomial'], { 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
# TODO: this should trigger a parameter validation error, but instead the non-grid ntrees silently overrides the drid values: GridSpec.for_dataset('gbm_iris_multinomial_grid', 'gbm', datasets['iris_multinomial'], { 'ntrees': 5, 'distribution': 'multinomial' }, { 'ntrees': [1, 5, 10], 'max_depth': [1, 3, 5] } ),
]
for grid_spec in grids_to_build:
grid = grid_spec.build_and_validate_grid(a_node)
for model_key in grid['model_ids']:
model_key = model_key['name']
built_models[model_key] = a_node.models(key=model_key)
# grid = a_node.grid(key='kmeans_prostate_grid', sort_by='', sort_order='desc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='totss', sort_order='desc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='tot_withinss', sort_order='desc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='betweenss', sort_order='desc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='totss', sort_order='asc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='tot_withinss', sort_order='asc')
h2o_test_utils.fetch_and_validate_grid_sort(a_node, key='kmeans_prostate_grid', sort_by='betweenss', sort_order='asc')
#######################################
# Test default parameters validation for each model builder
#
if h2o_test_utils.isVerbose(): print 'Testing ModelBuilder default parameters. . .'
model_builders = a_node.model_builders(timeoutSecs=240)['model_builders']
# Do we know about all of them?
server_algos = model_builders.keys()
assert len(set(server_algos) - set(algos)) == 0, "FAIL: Our set of algos doesn't match what the server knows about. Ours: " + repr(algos) + "; server's: " + repr(server_algos)
for algo, model_builder in model_builders.iteritems():
parameters_list = model_builder['parameters']
test_parameters = { value['name'] : value['default_value'] for value in parameters_list } # collect default parameters
if algo in algo_additional_default_params:
test_parameters.update(algo_additional_default_params[algo])
parameters_validation = a_node.validate_model_parameters(algo=algo, training_frame=None, parameters=test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
expected_count = 0
if expected_count != parameters_validation['error_count']:
print "validation errors: "
pp.pprint(parameters_validation)
assert expected_count == parameters_validation['error_count'], "FAIL: " + str(expected_count) + " != error_count in good-parameters parameters validation result."
#######################################
# Test DeepLearning parameters validation
#
# Default parameters:
model_builder = a_node.model_builders(algo='deeplearning', timeoutSecs=240)['model_builders']['deeplearning']
dl_test_parameters_list = model_builder['parameters']
dl_test_parameters = {value['name'] : value['default_value'] for value in dl_test_parameters_list}
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
if 0 != parameters_validation['error_count']:
print "validation errors: "
pp.pprint(parameters_validation)
assert 0 == parameters_validation['error_count'], "FAIL: 0 != error_count in good-parameters parameters validation result."
# Good parameters (note: testing with null training_frame):
dl_test_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]" }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in good-parameters parameters validation result."
h2o.H2O.verboseprint("Bad params validation messages: ", repr(parameters_validation))
if 0 != parameters_validation['error_count']:
print "validation errors: "
pp.pprint(parameters_validation)
assert 0 == parameters_validation['error_count'], "FAIL: 0 != error_count in good-parameters parameters validation result."
# Bad parameters (hidden is null):
# (note: testing with null training_frame)
dl_test_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]", 'input_dropout_ratio': 27 }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame=None, parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in bad-parameters parameters validation result (input_dropout_ratio)."
h2o.H2O.verboseprint("Good params validation messages: ", repr(parameters_validation))
assert 0 != parameters_validation['error_count'], "FAIL: 0 == error_count in bad-parameters parameters validation result: " + repr(parameters_validation)
found_expected_error = False
for validation_message in parameters_validation['messages']:
if validation_message['message_type'] == 'ERRR' and validation_message['field_name'] == 'input_dropout_ratio':
found_expected_error = True
assert found_expected_error, "FAIL: Failed to find error message about input_dropout_ratio in the validation messages."
# Bad parameters (no response_column):
dl_test_parameters = {'hidden': "[10, 20, 10]" }
parameters_validation = a_node.validate_model_parameters(algo='deeplearning', training_frame='prostate_binomial', parameters=dl_test_parameters, timeoutSecs=240) # synchronous
assert 'error_count' in parameters_validation, "FAIL: Failed to find error_count in bad-parameters parameters validation result (response_column)."
h2o.H2O.verboseprint("Good params validation messages: ", repr(parameters_validation))
assert 0 != parameters_validation['error_count'], "FAIL: 0 == error_count in bad-parameters parameters validation result: " + repr(parameters_validation)
#######################################
# Try to build DeepLearning model for Prostate but with bad parameters; we should get a ModelParametersSchema with the error.
if h2o_test_utils.isVerbose(): print 'About to try to build a DeepLearning model with bad parameters. . .'
dl_prostate_bad_parameters = {'response_column': 'CAPSULE', 'hidden': "[10, 20, 10]", 'input_dropout_ratio': 27 }
parameters_validation = a_node.build_model(algo='deeplearning', model_id='deeplearning_prostate_binomial_bad', training_frame='prostate_binomial', parameters=dl_prostate_bad_parameters, timeoutSecs=240) # synchronous
h2o_test_utils.validate_validation_messages(parameters_validation, ['input_dropout_ratio'])
assert parameters_validation['__http_response']['status_code'] == requests.codes.precondition_failed, "FAIL: expected 412 Precondition Failed from a bad build request, got: " + str(parameters_validation['__http_response']['status_code'])
if h2o_test_utils.isVerbose(): print 'Done trying to build DeepLearning model with bad parameters.'
#####################################
# Early test of predict()
# TODO: remove after we remove the early exit
p = a_node.predict(model='deeplearning_airlines_binomial', frame='airlines_binomial', predictions_frame='deeplearning_airlines_binomial_predictions')
h2o_test_utils.validate_predictions(a_node, p, 'deeplearning_airlines_binomial', 'airlines_binomial', 43978, predictions_frame='deeplearning_airlines_binomial_predictions')
h2o_test_utils.validate_frame_exists(a_node, 'deeplearning_airlines_binomial_predictions')
h2o.H2O.verboseprint("Predictions for scoring: ", 'deeplearning_airlines_binomial', " on: ", 'airlines_binomial', ": ", repr(p))
# print h2o_test_utils.dump_json(p)
|
nilbody/h2o-3
|
py/rest_tests/test_models.py
|
Python
|
apache-2.0
| 13,539
|
[
"Gaussian"
] |
fda95b98b26b183c825c2ae107836a54e792ac7d7af8b1d9394ad9e44fe497a2
|
"""
Spatial transformer layer, compatible with keras APIs
"""
import keras.backend as K
import numpy as np
from keras.layers import Layer
from keras.models import Model
def standardize_coords(coords, maxes, dim):
"""
standardize_coords - standardizes the coordinates in a mesh grid between -1 and 1 for each
dimension.
:param coords_grid - shape (dim, width, height, ...)
:param dim - spatial dimensionality of the data, e.g. 2 for dealing with image data.
Should match K.int_shape(coords_grid)[0].
:returns - the standardized coords, shape (dim, width, height, ...)
"""
maxes = K.cast(K.reshape(maxes, shape=[-1] + [1] * dim), "float32")
res = 2.0 * coords / maxes - 1.0
return res
def affine_transform(coords_grid, params, dim):
"""
affine_transform represents an affine transformation -
translation, rotation, scale and skew. Interprets params as the flat parameters of the
transformation matrix, for each sample.
:param coords_grid - grid of target coordinates
shape: (dim, width, height, ...)
:param params - parametrization of the affine transform
shape: (N, dim^2 + dim), dim^2 params for the rotation matrix + dim params
for the translation component
:returns - transformed coords_grid, to be used for sampling from the input image
shape: (N, dim, width, height, ...)
"""
# standardize, extend to homogenous coordinates
maxes = K.shape(coords_grid)[1:] - 1
coords_grid = standardize_coords(coords_grid, maxes, dim)
ones_pad = K.expand_dims(K.ones_like(coords_grid[0]), axis=0)
coords_grid = K.concatenate([coords_grid, ones_pad], axis=0)
# interpret the params as an affine transform matrix
transform_mat = K.reshape(x=params, shape=(-1, dim, dim + 1))
# apply the transformation (keras tensor product uses axis -2 for the second tensor)
permutation = tuple(range(1, dim)) + (0, dim)
coords_grid = K.permute_dimensions(x=coords_grid, pattern=permutation)
transformed = K.dot(transform_mat, coords_grid)
return transformed
def attention_transform(coords_grid, params, dim):
"""
attention_transform represents an attention transformation -
translation and isotropic scaling. Interprets params as the flat parameters of the
transformation matrix, for each sample.
:param coords_grid - grid of target coordinates
shape: (dim, width, height, ...)
:param params - parametrization of the attention transform
shape (N, dim + 1), 1 param for the isotropic scaling, dim params for the translation
component
:returns - transformed coords_grid, to be used for sampling from the input image
"""
# standardize, extend to homogenous coordinates
maxes = K.shape(coords_grid)[1:] - 1
coords_grid = standardize_coords(coords_grid, maxes, dim)
ones_pad = K.expand_dims(K.ones_like(coords_grid[0]), axis=0)
coords_grid = K.concatenate([coords_grid, ones_pad], axis=0)
# form the attention matrix, one part is a lambda * I, the other is the translation
n = K.shape(params)[0]
# scaling part: repeat lambda * I (for each param row)
# shape: (N, dim, dim)
scale_part = params[:, 0:1] * K.tile(x=K.reshape(K.eye(dim), shape=(1, -1)), n=[n, 1])
scale_part = K.reshape(scale_part, shape=[n, dim, dim])
# translation part
t_part = K.reshape(params[:, 1:], shape=(-1, dim, 1))
# attention matrix
transform_mat = K.concatenate([scale_part, t_part], axis=-1)
# apply the transformation (keras tensor product uses axis -2 for the second tensor)
coords_grid = K.permute_dimensions(x=coords_grid, pattern=(1, 0, 2))
transformed = K.dot(transform_mat, coords_grid)
return transformed
def tps_transform(coords_grid, params):
"""
tps_transform represents a thin plate spline transformation
:param coords_grid - grid of target coordinates
:param params - parametrization of the TPS transformation
:returns - transformed coords_grid, to be used for sampling from the input image
"""
raise NotImplementedError
def bitfield(n):
"""
bitfield - create list of binary 0 or 1 for the binary representation of n
:param n: an integer number
"""
# http://stackoverflow.com/questions/10321978/integer-to-bitfield-as-a-list
# a bit faster than int() conversion
return [1 if digit == '1' else 0 for digit in bin(n)[2:]]
def upscale(coords, maxes, dim):
"""
upscale - unstandardizes the given set of coordinates from [-1, 1] to [0, maxes]. If there
are coordinates out of bounds, they are still upscaled and not clipped or wrapped in this
function.
:param coords: the indices to sample with
shape: (N, dim, width, height, ...)
:param maxes: array of maximum values for each spatial dimension
shape: (dim,)
:param dim: dimensionality of the data, e.g. 2 for 2D images
"""
maxes = K.reshape(maxes, [-1] + [1] * dim)
coords = (coords + 1.0) * maxes / 2.0
return coords
def clip(coords, maxes, dim):
"""
clip - clips the given set of coordinates so that they are within maxes range
:param coords: the indices to sample with
shape: (N, dim, width, height, ...)
:param maxes: array of maximum values for each spatial dimension
shape: (dim,)
:param dim: dimensionality of the data, e.g. 2 for 2D images
"""
if K.backend() == "tensorflow":
import tensorflow as tf
coords = K.stack([tf.clip_by_value(coords[:, i], 0, maxes[i])
for i in range(dim)], axis=1)
else:
import theano.tensor as T
coords = K.stack([T.clip(coords[:, i], 0, maxes[i])
for i in range(dim)], axis=1)
coords = K.cast(coords, dtype="int32")
return coords
def wrap(coords, maxes, dim):
"""
wrap - wraps the given set of coordinates so that they are within maxes range.
:param coords: the indices to sample with
shape: (N, dim, width, height, ...)
:param maxes: array of maximum values for each spatial dimension
shape: (dim,)
:param dim: dimensionality of the data, e.g. 2 for 2D images
"""
maxes = K.cast(K.reshape(maxes, [-1] + [1] * dim), dtype="int32")
coords = K.cast(coords, dtype="int32")
coords %= maxes
return coords
def sample_tf(inputs, coords, dim):
"""
sample_tf - more efficient sampling for tensorflow
:param inputs: the tensor to sample from
:param coords: the indices to sample with
:param dim: the dimensionality of the data
:param wrapped: whether to wrap out of bound indices or to clip them
"""
import tensorflow as tf
# form coords in a way so that we can gather_nd with them
# For this, I need to add an additional indexing dimension, which will just be
coords = tf.transpose(coords, [0] + [i for i in range(2, 2 + dim)] + [1])
# coords.shape == [N, width, height, ..., dim]
N = tf.shape(coords)[0]
inner_shape = tf.shape(coords)[1:-1]
batch_indices = tf.range(N)
batch_indices = tf.reshape(batch_indices, [-1] + [1] * dim + [1])
batch_indices = tf.tile(batch_indices, [1] + [inner_shape[i] for i in range(dim)] + [1])
coords = tf.concat([batch_indices, coords], axis=-1)
# coords.shape == [N, width, height, ..., 1 + dim]
# inputs.shape == [N, width, height, ..., n_chan]
output = tf.gather_nd(inputs, coords)
return output
def sample(inputs, coords, dim, wrapped):
"""
sample - samples from the inputs tensor using coords as indices.
:param inputs: the tensor to sample from
shape: (N, width, height, ..., n_chan)
:param coords: the indices to sample with
shape: (N, dim, width, height, ...)
:param dim: dimensionality of the data, e.g. 2 for 2D images
:param wrapped: whether to wrap out of bound indices or to clip them
"""
inputs_shape = K.shape(inputs)
coords_shape = K.shape(coords)
outputs_shape = K.concatenate([inputs_shape[0:1], coords_shape[2:], inputs_shape[-1:]])
maxes = K.cast(inputs_shape[1:-1] - 1, "int32")
if wrapped:
coords = wrap(coords, maxes, dim)
else:
coords = clip(coords, maxes, dim)
if K.backend() == "tensorflow":
return sample_tf(inputs, coords, dim)
n = inputs_shape[0]
n_chan = inputs_shape[-1]
flat_inputs = K.reshape(inputs, (-1, n_chan))
flat_coords = K.flatten(coords[:, -1])
for i in reversed(range(dim - 1)):
flat_coords += K.prod(inputs_shape[1:i + 2]) * K.flatten(coords[:, i])
coords_per_sample = K.prod(coords_shape[2:])
# add the offsets for each sample in the minibatch
if K.backend() == "tensorflow":
import tensorflow as tf
offsets = tf.range(n) * K.prod(inputs_shape[1:-1])
else:
import theano.tensor as T
offsets = T.arange(n) * K.prod(inputs_shape[1:-1])
offsets = K.reshape(offsets, (-1, 1))
offsets = K.tile(offsets, (1, coords_per_sample))
offsets = K.flatten(offsets)
flat_coords += offsets
outputs = K.gather(flat_inputs, flat_coords)
outputs = K.reshape(outputs, outputs_shape)
return outputs
def interpolate_bilinear(coords, inputs, dim, wrap=False):
"""
interpolate_bilinear - the default interpolation kernel to be used with the spatial
transformer. Differential w.r.t. both the indices and the input tensors to be sampled.
:param coords
shape: (N, dim, width, height, ...)
:param inputs
shape: (N, width, height, .. n_chan)
:param dim - dimensionality of the data, e.g. 2 if inputs is a batch of images
:param wrap - whether to wrap, or otherwise clip during the interpolation
:returns - the sampled result
:shape (N, width, height, ..., n_chan), where width, height, ... come from the
coords shape
"""
inputs_shape = K.shape(inputs)
maxes = K.cast(inputs_shape[1:-1] - 1, "float32")
coords_float = upscale(coords, maxes, dim)
# floored coordinates, time to build the surrounding points based on them
if K.backend() == "tensorflow":
import tensorflow as tf
coords = tf.floor(coords_float)
else:
import theano.tensor as T
coords = T.floor(coords_float)
# construct the surrounding 2^dim coord sets which will all be used for interpolation
# (e.g. corresponding to the 4 points in 2D that surround the point to be interpolated,
# or to the 8 points in 3D, etc ...)
surround_coord_sets = []
surround_inputs = []
for i in range(2 ** dim):
bits = bitfield(i)
bits = [0] * (dim - len(bits)) + bits
offsets = K.variable(np.array(bits),
name="spatial_transform/bilinear_surround_offsets")
offsets = K.reshape(offsets, shape=[1, -1] + [1] * dim)
surround_coord_set = coords + offsets
surround_coord_sets.append(surround_coord_set)
# sample for each of the surrounding points before interpolating
surround_input = sample(inputs, surround_coord_set, dim, wrapped=wrap)
surround_inputs.append(surround_input)
# Bilinear interpolation, this part of the kernel lets the gradients flow through the
# coords as well as the inputs
products = list()
for coords_set, surround_input in zip(surround_coord_sets, surround_inputs):
if K.backend() == "tensorflow":
import tensorflow as tf
# shape N, width, height, ...
product = tf.reduce_prod(1 - tf.abs(coords_set - coords_float), axis=1)
else:
import theano.tensor as T
product = T.prod(1 - T.abs(coords_set - coords_float), axis=1)
# shape: (N, width, height, ..., n_channels)
product = surround_input * K.expand_dims(product, -1)
products.append(product)
return sum(products)
def interpolate_gaussian(coords, inputs, dim, wrap=False, kernel_size=None, kernel_step=None,
stddev=2.0):
"""
interpolate_gaussian - samples with coords from inputs, interpolating the results via a
differentiable gaussian kernel.
:param coords
shape: (N, dim, width, height, ...)
:param inputs
shape: (N, width, height, .. n_chan)
:param dim - dimensionality of the data, e.g. 2 if inputs is a batch of images
:param wrap - whether to wrap, or otherwise clip during the interpolation
:returns - the sampled result
:shape (N, width, height, ..., n_chan), where width, height, ... come from the
coords shape
"""
if not wrap:
print("Clipping is not supported for the gaussian kernel yet")
raise NotImplementedError
if K.backend() != "tensorflow":
print("Theano backend is currently not supported for the gaussian kernel")
raise NotImplementedError
inputs_shape = K.shape(inputs)
inputs_shape_list = [inputs_shape[i] for i in range(dim + 2)]
coords_shape = K.shape(coords)
coords_shape_list = [coords_shape[i] for i in range(dim + 2)]
inputs_dims = inputs_shape_list[1:-1]
maxes = K.cast(inputs_shape[1:-1] - 1, "float32")
coords_float = upscale(coords, maxes, dim)
import tensorflow as tf
from tensorflow.contrib.distributions import Normal
if not kernel_step or not kernel_size:
kernel_step = 1
# tile the float coords, extending them for the application of the gaussian aggregation later
extended_coords = tf.reshape(coords_float, coords_shape_list + [1] * dim)
if kernel_size:
m = kernel_size // kernel_step + (1 if kernel_size % kernel_step != 0 else 0)
extended_coords = tf.tile(
extended_coords, [1] * len(coords_shape_list) + [m] * dim)
else:
extended_coords = tf.tile(extended_coords, [1] * len(coords_shape_list) + inputs_dims)
# center a gaussian at each of the unstandardized transformed coordinates
coord_gaussians = Normal(loc=extended_coords, scale=stddev)
# shape: (N, dim, width, height, ..., img_width, img_height, ...)
for i in range(dim):
# create ranges for each of the dimensions to "spread" the coords across the image
if kernel_size:
m = kernel_size // kernel_step + (1 if kernel_size % kernel_step != 0 else 0)
limit = kernel_size
else:
m = inputs_dims[i]
limit = inputs_dims[i]
range_offset = tf.cast(tf.range(start=0, limit=limit, delta=kernel_step), "float32")
range_offset -= tf.cast((limit - 1.0) / 2.0, "float32")
# reshape so that the offset is broadcastet in all dimensions but the
# one for the current dimension
broadcast_shape = [1] * len(coords_shape_list) + i * [1] + \
[m] + (dim - i - 1) * [1]
# shape: (1, 1, 1, 1, ..., img_width, img_height, ...)
range_offset = tf.reshape(range_offset, broadcast_shape)
zero_pads = [tf.zeros_like(range_offset) for _ in range(dim - 1)]
# concatenate zeros for the rest of the dimensions
range_offset = tf.concat(zero_pads[:i] + [range_offset] + zero_pads[i + 1:], axis=1)
range_offset = tf.cast(range_offset, "float32")
extended_coords += range_offset
# now round and then sample
sampling_coords = tf.floor(extended_coords)
# double the dim as those coords are extended
samples = sample(inputs, sampling_coords, dim=dim * 2, wrapped=True)
# since the gaussians are isotropic, I have to reduce a product along the dim-dimension first
# TODO: this needs to be the meshgrid with image size, and not the scaled up coords
coord_gaussian_pdfs = coord_gaussians.prob(extended_coords)
coord_gaussian_pdfs = tf.reduce_prod(coord_gaussian_pdfs, axis=1)
# expand one broadcastable dimension for the image channels
coord_gaussian_pdfs = tf.expand_dims(coord_gaussian_pdfs, -1)
samples = samples * coord_gaussian_pdfs
# normalize the samples so that the weighting does not change the pixel intensities
reduction_indices = [i for i in range(dim + 1, 2 * dim + 1)]
norm_coeff = tf.reduce_sum(coord_gaussian_pdfs, keep_dims=True,
reduction_indices=reduction_indices)
samples /= norm_coeff
# reduce_sum along the img_width, img_height, ... etc. axes
samples = tf.reduce_sum(samples, reduction_indices=reduction_indices)
return samples
def interpolate_nearest(coords, inputs, dim, wrap=False):
"""
CAUTION: This interpolation kernel is not differentiable. Use only if you do not need
gradients flowing back throught he localization network.
interpolate_nearest - samples with coords from inputs, interpolating the results
via nearest neighbours rounding of the indices (which are not whole numbers yet).
:param coords
shape: (N, dim, width, height, ...)
:param inputs
shape: (N, width, height, .. n_chan)
:param dim - dimensionality of the data, e.g. 2 if inputs is a batch of images
:param wrap - whether to wrap, or otherwise clip during the interpolation
:returns - the sampled result
:shape (N, width, height, ..., n_chan), where width, height, ... come from the
coords shape
"""
inputs_shape = K.shape(inputs)
maxes = K.cast(inputs_shape[1:-1] - 1, "float32")
coords = upscale(coords, maxes, dim)
coords = K.round(coords)
return sample(inputs, coords, dim, wrapped=wrap)
class SpatialTransform(Layer):
"""
SpatialTransformer layer, which can automatically predict the parameters of a spatial
transformation that is then applied to the input.
:param output_shape - desired shape of the output image / volume / ..., without the channels
e.g. (width, height) or (width, height, depth) for 3D volumes
:param loc_network - neural network that will produce the transformation parameters
:param grid_transform_fn - function that interprets the parameters in the output of
loc_network as a transformation of image coordinates and applies it
:param interpolation_fn - function that samples the image with interpolation
:param wrap - whether to wrap, or otherwise clip during the interpolation
:param **kwargs
"""
def __init__(self, output_grid_shape,
loc_network,
grid_transform_fn,
interpolation_fn=interpolate_bilinear,
wrap=False,
**kwargs):
self.output_grid_shape = output_grid_shape
self.loc_network = loc_network
self.grid_transform_fn = grid_transform_fn
self.interpolation_fn = interpolation_fn
self.wrap = wrap
# initialize the coords grid
indices = np.indices(self.output_grid_shape, dtype="float32")
self.coords_grid = K.variable(indices, name="spatial_transform/grid_indices")
super(SpatialTransform, self).__init__(**kwargs)
def build(self, input_shape):
if isinstance(self.loc_network, Layer) or isinstance(self.loc_network, Model):
if hasattr(self, 'previous'):
self.loc_network.set_previous(self.previous)
self.loc_network.build(input_shape)
self.trainable_weights = self.loc_network.trainable_weights
# add regularization losses
for loss in self.output_fn.losses:
self.add_loss(loss)
self.input = self.loc_network.input
super(SpatialTransform, self).build(input_shape)
def call(self, x):
params = self.loc_network(x)
# dimensionality of the data is all dims without batch_size and channels
dim = len(K.int_shape(x)) - 2
transformed_coords = self.grid_transform_fn(coords_grid=self.coords_grid,
params=params,
dim=dim)
transformed_image = self.interpolation_fn(transformed_coords, x, dim=dim, wrap=self.wrap)
return transformed_image
def compute_output_shape(self, input_shape):
# add the channels dimension
return (input_shape[0],) + self.output_grid_shape + (input_shape[-1],)
|
taimir/keras-layers
|
klayers/transform/spatial_transform.py
|
Python
|
mit
| 20,259
|
[
"Gaussian"
] |
01266de11955f38dddae0172969a4021a5e2f82034dcb9c23ecf4c6b4e5eb873
|
"""
Octave (and Matlab) code printer
The `OctaveCodePrinter` converts SymPy expressions into Octave expressions.
It uses a subset of the Octave language for Matlab compatibility.
A complete code generator, which uses `octave_code` extensively, can be found
in `sympy.utilities.codegen`. The `codegen` module can be used to generate
complete source code files.
"""
from __future__ import print_function, division
from sympy.core import Mul, Pow, S, Rational
from sympy.core.compatibility import string_types, range
from sympy.core.mul import _keep_coeff
from sympy.codegen.ast import Assignment
from sympy.printing.codeprinter import CodePrinter
from sympy.printing.precedence import precedence
from re import search
# List of known functions. First, those that have the same name in
# SymPy and Octave. This is almost certainly incomplete!
known_fcns_src1 = ["sin", "cos", "tan", "cot", "sec", "csc",
"asin", "acos", "acot", "atan", "atan2", "asec", "acsc",
"sinh", "cosh", "tanh", "coth", "csch", "sech",
"asinh", "acosh", "atanh", "acoth", "asech", "acsch",
"erfc", "erfi", "erf", "erfinv", "erfcinv",
"besseli", "besselj", "besselk", "bessely",
"exp", "factorial", "floor", "fresnelc", "fresnels",
"gamma", "log", "polylog", "sign", "zeta"]
# These functions have different names ("Sympy": "Octave"), more
# generally a mapping to (argument_conditions, octave_function).
known_fcns_src2 = {
"Abs": "abs",
"ceiling": "ceil",
"Chi": "coshint",
"Ci": "cosint",
"conjugate": "conj",
"DiracDelta": "dirac",
"Heaviside": "heaviside",
"laguerre": "laguerreL",
"li": "logint",
"loggamma": "gammaln",
"polygamma": "psi",
"Shi": "sinhint",
"Si": "sinint",
}
class OctaveCodePrinter(CodePrinter):
"""
A printer to convert expressions to strings of Octave/Matlab code.
"""
printmethod = "_octave"
language = "Octave"
_operators = {
'and': '&',
'or': '|',
'not': '~',
}
_default_settings = {
'order': None,
'full_prec': 'auto',
'precision': 16,
'user_functions': {},
'human': True,
'contract': True,
'inline': True,
}
# Note: contract is for expressing tensors as loops (if True), or just
# assignment (if False). FIXME: this should be looked a more carefully
# for Octave.
def __init__(self, settings={}):
super(OctaveCodePrinter, self).__init__(settings)
self.known_functions = dict(zip(known_fcns_src1, known_fcns_src1))
self.known_functions.update(dict(known_fcns_src2))
userfuncs = settings.get('user_functions', {})
self.known_functions.update(userfuncs)
def _rate_index_position(self, p):
return p*5
def _get_statement(self, codestring):
return "%s;" % codestring
def _get_comment(self, text):
return "% {0}".format(text)
def _declare_number_const(self, name, value):
return "{0} = {1};".format(name, value)
def _format_code(self, lines):
return self.indent_code(lines)
def _traverse_matrix_indices(self, mat):
# Octave uses Fortran order (column-major)
rows, cols = mat.shape
return ((i, j) for j in range(cols) for i in range(rows))
def _get_loop_opening_ending(self, indices):
open_lines = []
close_lines = []
for i in indices:
# Octave arrays start at 1 and end at dimension
var, start, stop = map(self._print,
[i.label, i.lower + 1, i.upper + 1])
open_lines.append("for %s = %s:%s" % (var, start, stop))
close_lines.append("end")
return open_lines, close_lines
def _print_Mul(self, expr):
# print complex numbers nicely in Octave
if (expr.is_number and expr.is_imaginary and
expr.as_coeff_Mul()[0].is_integer):
return "%si" % self._print(-S.ImaginaryUnit*expr)
# cribbed from str.py
prec = precedence(expr)
c, e = expr.as_coeff_Mul()
if c < 0:
expr = _keep_coeff(-c, e)
sign = "-"
else:
sign = ""
a = [] # items in the numerator
b = [] # items that are in the denominator (if any)
if self.order not in ('old', 'none'):
args = expr.as_ordered_factors()
else:
# use make_args in case expr was something like -x -> x
args = Mul.make_args(expr)
# Gather args for numerator/denominator
for item in args:
if (item.is_commutative and item.is_Pow and item.exp.is_Rational
and item.exp.is_negative):
if item.exp != -1:
b.append(Pow(item.base, -item.exp, evaluate=False))
else:
b.append(Pow(item.base, -item.exp))
elif item.is_Rational and item is not S.Infinity:
if item.p != 1:
a.append(Rational(item.p))
if item.q != 1:
b.append(Rational(item.q))
else:
a.append(item)
a = a or [S.One]
a_str = [self.parenthesize(x, prec) for x in a]
b_str = [self.parenthesize(x, prec) for x in b]
# from here it differs from str.py to deal with "*" and ".*"
def multjoin(a, a_str):
# here we probably are assuming the constants will come first
r = a_str[0]
for i in range(1, len(a)):
mulsym = '*' if a[i-1].is_number else '.*'
r = r + mulsym + a_str[i]
return r
if len(b) == 0:
return sign + multjoin(a, a_str)
elif len(b) == 1:
divsym = '/' if b[0].is_number else './'
return sign + multjoin(a, a_str) + divsym + b_str[0]
else:
divsym = '/' if all([bi.is_number for bi in b]) else './'
return (sign + multjoin(a, a_str) +
divsym + "(%s)" % multjoin(b, b_str))
def _print_Pow(self, expr):
powsymbol = '^' if all([x.is_number for x in expr.args]) else '.^'
PREC = precedence(expr)
if expr.exp == S.Half:
return "sqrt(%s)" % self._print(expr.base)
if expr.is_commutative:
if expr.exp == -S.Half:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "sqrt(%s)" % self._print(expr.base)
if expr.exp == -S.One:
sym = '/' if expr.base.is_number else './'
return "1" + sym + "%s" % self.parenthesize(expr.base, PREC)
return '%s%s%s' % (self.parenthesize(expr.base, PREC), powsymbol,
self.parenthesize(expr.exp, PREC))
def _print_MatPow(self, expr):
PREC = precedence(expr)
return '%s^%s' % (self.parenthesize(expr.base, PREC),
self.parenthesize(expr.exp, PREC))
def _print_Pi(self, expr):
return 'pi'
def _print_ImaginaryUnit(self, expr):
return "1i"
def _print_Exp1(self, expr):
return "exp(1)"
def _print_GoldenRatio(self, expr):
# FIXME: how to do better, e.g., for octave_code(2*GoldenRatio)?
#return self._print((1+sqrt(S(5)))/2)
return "(1+sqrt(5))/2"
def _print_NumberSymbol(self, expr):
if self._settings["inline"]:
return self._print(expr.evalf(self._settings["precision"]))
else:
# assign to a variable, perhaps more readable for longer program
return super(OctaveCodePrinter, self)._print_NumberSymbol(expr)
def _print_Assignment(self, expr):
from sympy.functions.elementary.piecewise import Piecewise
from sympy.tensor.indexed import IndexedBase
# Copied from codeprinter, but remove special MatrixSymbol treatment
lhs = expr.lhs
rhs = expr.rhs
# We special case assignments that take multiple lines
if not self._settings["inline"] and isinstance(expr.rhs, Piecewise):
# Here we modify Piecewise so each expression is now
# an Assignment, and then continue on the print.
expressions = []
conditions = []
for (e, c) in rhs.args:
expressions.append(Assignment(lhs, e))
conditions.append(c)
temp = Piecewise(*zip(expressions, conditions))
return self._print(temp)
if self._settings["contract"] and (lhs.has(IndexedBase) or
rhs.has(IndexedBase)):
# Here we check if there is looping to be done, and if so
# print the required loops.
return self._doprint_loops(rhs, lhs)
else:
lhs_code = self._print(lhs)
rhs_code = self._print(rhs)
return self._get_statement("%s = %s" % (lhs_code, rhs_code))
def _print_Infinity(self, expr):
return 'inf'
def _print_NegativeInfinity(self, expr):
return '-inf'
def _print_NaN(self, expr):
return 'NaN'
def _print_list(self, expr):
return '{' + ', '.join(self._print(a) for a in expr) + '}'
_print_tuple = _print_list
_print_Tuple = _print_list
def _print_BooleanTrue(self, expr):
return "true"
def _print_BooleanFalse(self, expr):
return "false"
def _print_bool(self, expr):
return str(expr).lower()
# Could generate quadrature code for definite Integrals?
#_print_Integral = _print_not_supported
def _print_MatrixBase(self, A):
# Handle zero dimensions:
if (A.rows, A.cols) == (0, 0):
return '[]'
elif A.rows == 0 or A.cols == 0:
return 'zeros(%s, %s)' % (A.rows, A.cols)
elif (A.rows, A.cols) == (1, 1):
# Octave does not distinguish between scalars and 1x1 matrices
return self._print(A[0, 0])
elif A.rows == 1:
return "[%s]" % A.table(self, rowstart='', rowend='', colsep=' ')
elif A.cols == 1:
# note .table would unnecessarily equispace the rows
return "[%s]" % "; ".join([self._print(a) for a in A])
return "[%s]" % A.table(self, rowstart='', rowend='',
rowsep=';\n', colsep=' ')
def _print_SparseMatrix(self, A):
from sympy.matrices import Matrix
L = A.col_list();
# make row vectors of the indices and entries
I = Matrix([[k[0] + 1 for k in L]])
J = Matrix([[k[1] + 1 for k in L]])
AIJ = Matrix([[k[2] for k in L]])
return "sparse(%s, %s, %s, %s, %s)" % (self._print(I), self._print(J),
self._print(AIJ), A.rows, A.cols)
# FIXME: Str/CodePrinter could define each of these to call the _print
# method from higher up the class hierarchy (see _print_NumberSymbol).
# Then subclasses like us would not need to repeat all this.
_print_Matrix = \
_print_DenseMatrix = \
_print_MutableDenseMatrix = \
_print_ImmutableMatrix = \
_print_ImmutableDenseMatrix = \
_print_MatrixBase
_print_MutableSparseMatrix = \
_print_ImmutableSparseMatrix = \
_print_SparseMatrix
def _print_MatrixElement(self, expr):
return self._print(expr.parent) + '(%s, %s)'%(expr.i+1, expr.j+1)
def _print_MatrixSlice(self, expr):
def strslice(x, lim):
l = x[0] + 1
h = x[1]
step = x[2]
lstr = self._print(l)
hstr = 'end' if h == lim else self._print(h)
if step == 1:
if l == 1 and h == lim:
return ':'
if l == h:
return lstr
else:
return lstr + ':' + hstr
else:
return ':'.join((lstr, self._print(step), hstr))
return (self._print(expr.parent) + '(' +
strslice(expr.rowslice, expr.parent.shape[0]) + ', ' +
strslice(expr.colslice, expr.parent.shape[1]) + ')')
def _print_Indexed(self, expr):
inds = [ self._print(i) for i in expr.indices ]
return "%s(%s)" % (self._print(expr.base.label), ", ".join(inds))
def _print_Idx(self, expr):
return self._print(expr.label)
def _print_Identity(self, expr):
return "eye(%s)" % self._print(expr.shape[0])
def _print_uppergamma(self, expr):
return "gammainc(%s, %s, 'upper')" % (self._print(expr.args[1]),
self._print(expr.args[0]))
def _print_lowergamma(self, expr):
return "gammainc(%s, %s, 'lower')" % (self._print(expr.args[1]),
self._print(expr.args[0]))
def _print_sinc(self, expr):
#Note: Divide by pi because Octave implements normalized sinc function.
return "sinc(%s)" % self._print(expr.args[0]/S.Pi)
def _print_hankel1(self, expr):
return "besselh(%s, 1, %s)" % (self._print(expr.order),
self._print(expr.argument))
def _print_hankel2(self, expr):
return "besselh(%s, 2, %s)" % (self._print(expr.order),
self._print(expr.argument))
# Note: as of 2015, Octave doesn't have spherical Bessel functions
def _print_jn(self, expr):
from sympy.functions import sqrt, besselj
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*besselj(expr.order + S.Half, x)
return self._print(expr2)
def _print_yn(self, expr):
from sympy.functions import sqrt, bessely
x = expr.argument
expr2 = sqrt(S.Pi/(2*x))*bessely(expr.order + S.Half, x)
return self._print(expr2)
def _print_airyai(self, expr):
return "airy(0, %s)" % self._print(expr.args[0])
def _print_airyaiprime(self, expr):
return "airy(1, %s)" % self._print(expr.args[0])
def _print_airybi(self, expr):
return "airy(2, %s)" % self._print(expr.args[0])
def _print_airybiprime(self, expr):
return "airy(3, %s)" % self._print(expr.args[0])
def _print_LambertW(self, expr):
# argument order is reversed
args = ", ".join([self._print(x) for x in reversed(expr.args)])
return "lambertw(" + args + ")"
def _print_Piecewise(self, expr):
if expr.args[-1].cond != True:
# We need the last conditional to be a True, otherwise the resulting
# function may not return a result.
raise ValueError("All Piecewise expressions must contain an "
"(expr, True) statement to be used as a default "
"condition. Without one, the generated "
"expression may not evaluate to anything under "
"some condition.")
lines = []
if self._settings["inline"]:
# Express each (cond, expr) pair in a nested Horner form:
# (condition) .* (expr) + (not cond) .* (<others>)
# Expressions that result in multiple statements won't work here.
ecpairs = ["({0}).*({1}) + (~({0})).*(".format
(self._print(c), self._print(e))
for e, c in expr.args[:-1]]
elast = "%s" % self._print(expr.args[-1].expr)
pw = " ...\n".join(ecpairs) + elast + ")"*len(ecpairs)
# Note: current need these outer brackets for 2*pw. Would be
# nicer to teach parenthesize() to do this for us when needed!
return "(" + pw + ")"
else:
for i, (e, c) in enumerate(expr.args):
if i == 0:
lines.append("if (%s)" % self._print(c))
elif i == len(expr.args) - 1 and c == True:
lines.append("else")
else:
lines.append("elseif (%s)" % self._print(c))
code0 = self._print(e)
lines.append(code0)
if i == len(expr.args) - 1:
lines.append("end")
return "\n".join(lines)
def indent_code(self, code):
"""Accepts a string of code or a list of code lines"""
# code mostly copied from ccode
if isinstance(code, string_types):
code_lines = self.indent_code(code.splitlines(True))
return ''.join(code_lines)
tab = " "
inc_regex = ('^function ', '^if ', '^elseif ', '^else$', '^for ')
dec_regex = ('^end$', '^elseif ', '^else$')
# pre-strip left-space from the code
code = [ line.lstrip(' \t') for line in code ]
increase = [ int(any([search(re, line) for re in inc_regex]))
for line in code ]
decrease = [ int(any([search(re, line) for re in dec_regex]))
for line in code ]
pretty = []
level = 0
for n, line in enumerate(code):
if line == '' or line == '\n':
pretty.append(line)
continue
level -= decrease[n]
pretty.append("%s%s" % (tab*level, line))
level += increase[n]
return pretty
def octave_code(expr, assign_to=None, **settings):
r"""Converts `expr` to a string of Octave (or Matlab) code.
The string uses a subset of the Octave language for Matlab compatibility.
Parameters
==========
expr : Expr
A sympy expression to be converted.
assign_to : optional
When given, the argument is used as the name of the variable to which
the expression is assigned. Can be a string, ``Symbol``,
``MatrixSymbol``, or ``Indexed`` type. This can be helpful for
expressions that generate multi-line statements.
precision : integer, optional
The precision for numbers such as pi [default=16].
user_functions : dict, optional
A dictionary where keys are ``FunctionClass`` instances and values are
their string representations. Alternatively, the dictionary value can
be a list of tuples i.e. [(argument_test, cfunction_string)]. See
below for examples.
human : bool, optional
If True, the result is a single string that may contain some constant
declarations for the number symbols. If False, the same information is
returned in a tuple of (symbols_to_declare, not_supported_functions,
code_text). [default=True].
contract: bool, optional
If True, ``Indexed`` instances are assumed to obey tensor contraction
rules and the corresponding nested loops over indices are generated.
Setting contract=False will not generate loops, instead the user is
responsible to provide values for the indices in the code.
[default=True].
inline: bool, optional
If True, we try to create single-statement code instead of multiple
statements. [default=True].
Examples
========
>>> from sympy import octave_code, symbols, sin, pi
>>> x = symbols('x')
>>> octave_code(sin(x).series(x).removeO())
'x.^5/120 - x.^3/6 + x'
>>> from sympy import Rational, ceiling, Abs
>>> x, y, tau = symbols("x, y, tau")
>>> octave_code((2*tau)**Rational(7, 2))
'8*sqrt(2)*tau.^(7/2)'
Note that element-wise (Hadamard) operations are used by default between
symbols. This is because its very common in Octave to write "vectorized"
code. It is harmless if the values are scalars.
>>> octave_code(sin(pi*x*y), assign_to="s")
's = sin(pi*x.*y);'
If you need a matrix product "*" or matrix power "^", you can specify the
symbol as a ``MatrixSymbol``.
>>> from sympy import Symbol, MatrixSymbol
>>> n = Symbol('n', integer=True, positive=True)
>>> A = MatrixSymbol('A', n, n)
>>> octave_code(3*pi*A**3)
'(3*pi)*A^3'
This class uses several rules to decide which symbol to use a product.
Pure numbers use "*", Symbols use ".*" and MatrixSymbols use "*".
A HadamardProduct can be used to specify componentwise multiplication ".*"
of two MatrixSymbols. There is currently there is no easy way to specify
scalar symbols, so sometimes the code might have some minor cosmetic
issues. For example, suppose x and y are scalars and A is a Matrix, then
while a human programmer might write "(x^2*y)*A^3", we generate:
>>> octave_code(x**2*y*A**3)
'(x.^2.*y)*A^3'
Matrices are supported using Octave inline notation. When using
``assign_to`` with matrices, the name can be specified either as a string
or as a ``MatrixSymbol``. The dimenions must align in the latter case.
>>> from sympy import Matrix, MatrixSymbol
>>> mat = Matrix([[x**2, sin(x), ceiling(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 sin(x) ceil(x)];'
``Piecewise`` expressions are implemented with logical masking by default.
Alternatively, you can pass "inline=False" to use if-else conditionals.
Note that if the ``Piecewise`` lacks a default term, represented by
``(expr, True)`` then an error will be thrown. This is to prevent
generating an expression that may not evaluate to anything.
>>> from sympy import Piecewise
>>> pw = Piecewise((x + 1, x > 0), (x, True))
>>> octave_code(pw, assign_to=tau)
'tau = ((x > 0).*(x + 1) + (~(x > 0)).*(x));'
Note that any expression that can be generated normally can also exist
inside a Matrix:
>>> mat = Matrix([[x**2, pw, sin(x)]])
>>> octave_code(mat, assign_to='A')
'A = [x.^2 ((x > 0).*(x + 1) + (~(x > 0)).*(x)) sin(x)];'
Custom printing can be defined for certain types by passing a dictionary of
"type" : "function" to the ``user_functions`` kwarg. Alternatively, the
dictionary value can be a list of tuples i.e., [(argument_test,
cfunction_string)]. This can be used to call a custom Octave function.
>>> from sympy import Function
>>> f = Function('f')
>>> g = Function('g')
>>> custom_functions = {
... "f": "existing_octave_fcn",
... "g": [(lambda x: x.is_Matrix, "my_mat_fcn"),
... (lambda x: not x.is_Matrix, "my_fcn")]
... }
>>> mat = Matrix([[1, x]])
>>> octave_code(f(x) + g(x) + g(mat), user_functions=custom_functions)
'existing_octave_fcn(x) + my_fcn(x) + my_mat_fcn([1 x])'
Support for loops is provided through ``Indexed`` types. With
``contract=True`` these expressions will be turned into loops, whereas
``contract=False`` will just print the assignment expression that should be
looped over:
>>> from sympy import Eq, IndexedBase, Idx, ccode
>>> len_y = 5
>>> y = IndexedBase('y', shape=(len_y,))
>>> t = IndexedBase('t', shape=(len_y,))
>>> Dy = IndexedBase('Dy', shape=(len_y-1,))
>>> i = Idx('i', len_y-1)
>>> e = Eq(Dy[i], (y[i+1]-y[i])/(t[i+1]-t[i]))
>>> octave_code(e.rhs, assign_to=e.lhs, contract=False)
'Dy(i) = (y(i + 1) - y(i))./(t(i + 1) - t(i));'
"""
return OctaveCodePrinter(settings).doprint(expr, assign_to)
def print_octave_code(expr, **settings):
"""Prints the Octave (or Matlab) representation of the given expression.
See `octave_code` for the meaning of the optional arguments.
"""
print(octave_code(expr, **settings))
|
NikNitro/Python-iBeacon-Scan
|
sympy/printing/octave.py
|
Python
|
gpl-3.0
| 23,630
|
[
"DIRAC"
] |
1d6a9a560f0b3b1cdfe357425702141c850cab108f3c430537db9f0bcaa17102
|
# This module provides classes that represent graphics objects to be
# output to VMD. This module is as compatible as possible with module
# VRML. Important differences:
# - No general polygon objects.
# - Only the 'diffuse color' attribute of materials is used for rendering.
# Warning: loading cubes into VMD is very slow, as each cube is represented
# by 12 individual triangles.
#
# Written by: Konrad Hinsen <hinsen@cnrs-orleans.fr>
# Last revision: 2004-9-29
#
"""This module provides definitions of simple 3D graphics objects and
scenes containing them, in a form that can be fed to the molecular
visualization program VMD. Scenes can either be written as VMD script
files, or visualized directly by running VMD.
There are a few attributes that are common to all graphics objects:
material -- a Material object defining color and surface properties
comment -- a comment string that will be written to the VRML file
reuse -- a boolean flag (defaulting to false). If set to one,
the object may share its VRML definition with other
objects. This reduces the size of the VRML file, but
can yield surprising side effects in some cases.
This module is almost compatible with the modules VRML and VRML2, which
provide visualization by VRML browsers. There is no Polygon objects,
and the only material attribute supported is diffuse_color. Note
also that loading a scene with many cubes into VMD is very slow, because
each cube is represented by 12 individual triangles.
Example:
>>>from VMD import *
>>>scene = Scene([])
>>>scale = ColorScale(10.)
>>>for x in range(11):
>>> color = scale(x)
>>> scene.addObject(Cube(Vector(x, 0., 0.), 0.2,
>>> material=Material(diffuse_color = color)))
>>>scene.view()
"""
from Scientific.IO.TextFile import TextFile
from Scientific.Geometry import Transformation, Vector, VectorModule
import Numeric
import os, string, sys, tempfile
from Color import *
#
# VMD file
#
class SceneFile:
def __init__(self, filename, mode = 'r', scale = 1., delete = 0):
if mode == 'r':
raise TypeError, 'Not yet implemented.'
self.file = TextFile(filename, 'w')
self.memo = {}
self.delete = delete
self.scale = scale
self.filename = filename
self.writeString('proc python_graphics {} {\n')
self.writeString('mol new\n')
self.writeString('graphics 0 color 35\n')
def __del__(self):
self.close()
def writeString(self, data):
self.file.write(data)
def writeVector(self, v):
self.writeString(" {%g %g %g}" % tuple(v))
def close(self):
if self.file is not None:
self.writeString('}\npython_graphics\n')
self.writeString('display resetview\n')
if self.delete:
self.writeString('file delete ' + self.filename)
self.file.close()
self.file = None
def write(self, object):
object.writeToFile(self)
#
# Scene
#
class Scene:
"""VMD scene
A VMD scene is a collection of graphics objects that can be
written to a VMD script file or fed directly to VMD.
Constructor: Scene(|objects|=None, **|options|)
Arguments:
|objects| -- a list of graphics objects or 'None' for an empty scene
|options| -- options as keyword arguments. The only option available
is "scale", whose value must be a positive number which
specifies a scale factor applied to all coordinates of
geometrical objects *except* for molecule objects, which
cannot be scaled.
"""
def __init__(self, objects=None, **options):
if objects is None:
self.objects = []
elif type(objects) == type([]):
self.objects = objects
else:
self.objects = [objects]
try:
self.scale = options['scale']
except KeyError:
self.scale = 1.
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def addObject(self, object):
"Adds |object| to the list of graphics objects."
self.objects.append(object)
def writeToFile(self, filename, delete = 0):
"Writes the scene to a VRML file with name |filename|."
file = SceneFile(filename, 'w', self.scale, delete)
for o in self.objects:
o.writeToFile(file)
file.close()
def view(self, *args):
"Start VMD for the scene."
filename = tempfile.mktemp()
self.writeToFile(filename, 1)
if sys.platform == 'win32':
#Unless VMD (or a batch file for it) is on the path
#which is not done by their default install) we must
#specify the path in full, which by default is
#C:\Program Files\University of Illinois\VMD\vmd.exe
#
#Note that on non-English versions of Windows,
#the name "Program Files" does change. I believe
#there is an API call to ask for it, but
#there is also an Environment Variable:
program_files = 'C:\\Program Files'
if os.environ.has_key('PROGRAMFILES') :
program_files = os.environ['PROGRAMFILES']
vmd_exe = os.path.join(program_files, 'University of Illinois',
'VMD','vmd.exe')
#Check that vmd.exe does exist at this point, otherwise
#will get a path not found error
if os.path.exists(vmd_exe) :
#Because the program path has spaces, it must be quoted.
#The filename MAY have spaces, so quote that too.
#
#Is the pipe stuff ( 1> /dev/null 2>&1 ) doing anything
#important? Leaving it off makes it work...
#
#os.system('"' + vmd_exe + '" -nt -e "' + filename + '"')
#os.system can work, but there are two problems:
# * it gives me grief with spaces in filenames
# (even if they are quoted)
# * its a blocking function, unlike the VRML, VRML2
# and VPython visualisations which don't pause Python
import win32api
win32api.WinExec('"' + vmd_exe + '" -nt -e "' + filename + '"')
else :
print "Error - could not find VMD, tried:"
print vmd_exe
else:
os.system('vmd -e ' + filename + ' 1> /dev/null 2>&1')
#
# Base class for everything that produces graphic objects
#
class VMDObject:
def __init__(self, attr):
self.attr = {}
for key, value in attr.items():
if key in self.attribute_names:
self.attr[key] = value
else:
raise AttributeError, 'illegal attribute: ' + str(key)
attribute_names = ['comment']
def __getitem__(self, attr):
try:
return self.attr[attr]
except KeyError:
return None
def __setitem__(self, attr, value):
self.attr[attr] = value
def __copy__(self):
return copy.deepcopy(self)
def writeToFile(self, file):
raise AttributeError, 'Class ' + self.__class__.__name__ + \
' does not implement file output.'
#
# Molecules (via PDB)
#
class Molecules(VMDObject):
"""Molecules from a PDB file
Constructor: Molecules(|pdb_file|)
"""
def __init__(self, object, **attr):
VMDObject.__init__(self, attr)
self.object = object
def writeToFile(self, file):
comment = self['comment']
if comment is not None:
file.writeString('# ' + comment + '\n')
if type(self.object) == type(''):
file.writeString('mol load pdb ' + self.object + '\n')
else:
tempdir = tempfile.tempdir
tempfile.tempdir = os.path.split(file.filename)[0]
filename = tempfile.mktemp()+'.pdb'
tempfile.tempdir = tempdir
self.object.writeToFile(filename)
file.writeString('mol load pdb ' + filename + '\n')
if file.delete:
file.writeString('file delete ' + filename + '\n')
#
# Shapes
#
class ShapeObject(VMDObject):
def __init__(self, attr):
VMDObject.__init__(self, attr)
attribute_names = VMDObject.attribute_names + ['material']
def __add__(self, other):
return Group([self]) + Group([other])
def writeToFile(self, file):
comment = self['comment']
if comment is not None:
file.writeString('# ' + comment + '\n')
material = self['material']
if material is not None:
material.writeToFile(file)
self.writeSpecification(file)
def use(self, file):
pass
class Sphere(ShapeObject):
"""Sphere
Constructor: Sphere(|center|, |radius|, **|attributes|)
Arguments:
|center| -- the center of the sphere (a vector)
|radius| -- the sphere radius (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, center, radius, **attr):
self.radius = radius
self.center = center
ShapeObject.__init__(self, attr)
def writeSpecification(self, file):
file.writeString('graphics 0 sphere')
file.writeVector(self.center*file.scale)
file.writeString(' radius ' + `self.radius*file.scale` + '\n')
class Cube(ShapeObject):
"""Cube
Constructor: Cube(|center|, |edge|, **|attributes|)
Arguments:
|center| -- the center of the cube (a vector)
|edge| -- the length of an edge (a positive number)
|attributes| -- any graphics object attribute
The edges of a cube are always parallel to the coordinate axes.
"""
def __init__(self, center, edge, **attr):
self.edge = edge
self.center = center
ShapeObject.__init__(self, attr)
def writeSpecification(self, file):
d = 0.5*self.edge
for ext1, ext2 in [(VectorModule.ex, VectorModule.ey),
(VectorModule.ey, VectorModule.ez),
(VectorModule.ez, VectorModule.ex)]:
norm = ext1.cross(ext2)
for offset in [-1, 1]:
p1 = d*(offset*norm-ext1-ext2)+self.center
p2 = d*(offset*norm-ext1+ext2)+self.center
p3 = d*(offset*norm+ext1-ext2)+self.center
p4 = d*(offset*norm+ext1+ext2)+self.center
file.writeString('graphics 0 triangle')
file.writeVector(p1*file.scale)
file.writeVector(p2*file.scale)
file.writeVector(p3*file.scale)
file.writeString('\n')
file.writeString('graphics 0 triangle')
file.writeVector(p2*file.scale)
file.writeVector(p3*file.scale)
file.writeVector(p4*file.scale)
file.writeString('\n')
class Cylinder(ShapeObject):
"""Cylinder
Constructor: Cylinder(|point1|, |point2|, |radius|,
|faces|='(1, 1, 1)', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors)
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|faces| -- a sequence of three boolean flags, corresponding to
the cylinder hull and the two circular end pieces,
specifying for each of these parts whether it is visible
or not.
"""
def __init__(self, point1, point2, radius, faces = (1, 1, 1), **attr):
self.faces = faces
self.radius = radius
self.point1 = point1
self.point2 = point2
ShapeObject.__init__(self, attr)
def writeSpecification(self, file):
file.writeString('graphics 0 cylinder')
file.writeVector(self.point1*file.scale)
file.writeVector(self.point2*file.scale)
file.writeString(' radius ' + `self.radius*file.scale`)
if self.faces[:2] == (1, 1):
file.writeString(' filled yes')
file.writeString('\n')
class Cone(ShapeObject):
"""Cone
Constructor: Cone(|point1|, |point2|, |radius|, |face|='1', **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the cylinder axis (vectors).
|point1| is the tip of the cone.
|radius| -- the radius (a positive number)
|attributes| -- any graphics object attribute
|face| -- a boolean flag, specifying if the circular bottom is visible
"""
def __init__(self, point1, point2, radius, face = 1, **attr):
self.face = face
self.radius = radius
self.point1 = point1
self.point2 = point2
ShapeObject.__init__(self, attr)
def writeSpecification(self, file):
file.writeString('graphics 0 cone')
file.writeVector(self.point2*file.scale)
file.writeVector(self.point1*file.scale)
file.writeString(' radius ' + `self.radius*file.scale` +
' resolution 12\n')
class Line(ShapeObject):
"""Line
Constructor: Line(|point1|, |point2|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the line (vectors)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, **attr):
self.point1 = point1
self.point2 = point2
ShapeObject.__init__(self, attr)
def writeSpecification(self, file):
file.writeString('graphics 0 line')
file.writeVector(self.point1*file.scale)
file.writeVector(self.point2*file.scale)
file.writeString('\n')
#
# Groups
#
class Group:
def __init__(self, objects, **attr):
self.objects = []
for o in objects:
if isGroup(o):
self.objects = self.objects + o.objects
else:
self.objects.append(o)
for key, value in attr.items():
for o in self.objects:
o[key] = value
is_group = 1
def __len__(self):
return len(self.objects)
def __getitem__(self, item):
return self.object[item]
def __coerce__(self, other):
if not isGroup(other):
other = Group([other])
return (self, other)
def __add__(self, other):
return Group(self.objects + other.objects)
def writeToFile(self, file):
for o in self.objects:
o.writeToFile(file)
def isGroup(x):
return hasattr(x, 'is_group')
#
# Composite Objects
#
class Arrow(Group):
"""Arrow
An arrow consists of a cylinder and a cone.
Constructor: Arrow(|point1|, |point2|, |radius|, **|attributes|)
Arguments:
|point1|, |point2| -- the end points of the arrow (vectors).
|point2| defines the tip of the arrow.
|radius| -- the radius of the arrow shaft (a positive number)
|attributes| -- any graphics object attribute
"""
def __init__(self, point1, point2, radius, **attr):
axis = point2-point1
height = axis.length()
axis = axis/height
cone_height = min(height, 4.*radius)
cylinder_height = height - cone_height
junction = point2-axis*cone_height
cone = apply(Cone, (point2, junction, 0.75*cone_height), attr)
objects = [cone]
if cylinder_height > 0.005*radius:
cylinder = apply(Cylinder, (point1, junction, radius), attr)
objects.append(cylinder)
Group.__init__(self, objects)
#
# Materials
#
class Material(VMDObject):
"""Material for graphics objects
A material defines the color and surface properties of an object.
Constructor: Material(**|attributes|)
The accepted attributes are "ambient_color", "diffuse_color",
"specular_color", "emissive_color", "shininess", and "transparency".
Only "diffuse_color" is used, the others are permitted for compatibility
with the VRML modules.
"""
def __init__(self, **attr):
VMDObject.__init__(self, attr)
attribute_names = VMDObject.attribute_names + \
['ambient_color', 'diffuse_color', 'specular_color',
'emissive_color', 'shininess', 'transparency']
def writeToFile(self, file):
try:
last = file.memo['material']
if last == self: return
except KeyError: pass
try:
color = self.attr['diffuse_color']
except KeyError:
color = Color((1., 1., 1.))
file.writeString('color change rgb 35 ' + str(color) + '\n')
file.memo['material'] = self
#
# Predefined materials
#
def DiffuseMaterial(color):
"Returns a material with the 'diffuse color' attribute set to |color|."
if type(color) is type(''):
color = ColorByName(color)
try:
return diffuse_material_dict[color]
except KeyError:
m = Material(diffuse_color = color)
diffuse_material_dict[color] = m
return m
diffuse_material_dict = {}
EmissiveMaterial = DiffuseMaterial
#
# Test code
#
if __name__ == '__main__':
if 0:
spheres = DiffuseMaterial('green')
links = DiffuseMaterial('red')
s1 = Sphere(VectorModule.null, 0.05, material = spheres)
s2 = Sphere(VectorModule.ex, 0.05, material = spheres)
s3 = Sphere(VectorModule.ey, 0.05, material = spheres)
s4 = Sphere(VectorModule.ez, 0.05, material = spheres)
a1 = Arrow(VectorModule.null, VectorModule.ex, 0.01, material = links)
a2 = Arrow(VectorModule.null, VectorModule.ey, 0.01, material = links)
a3 = Arrow(VectorModule.null, VectorModule.ez, 0.01, material = links)
scene = Scene([s1, s2, s3, s4, a1, a2, a3])
scene.view()
if 0:
scene = Scene([])
scale = SymmetricColorScale(10., 10)
for x in range(-10, 11):
color = scale(x)
m = Material(diffuse_color = color)
scene.addObject(Cube(Vector(x,0.,0.), 0.2, material=m))
scene.view()
if 1:
scene = Scene([])
scale = ColorScale(10.)
for x in range(11):
color = scale(x)
m = Material(diffuse_color = color)
scene.addObject(Cube(Vector(x,0.,0.), 0.2, material=m))
scene.view()
|
OS2World/DEV-PYTHON-UTIL-ScientificPython
|
src/Lib/site-packages/Scientific/Visualization/VMD.py
|
Python
|
isc
| 16,889
|
[
"VMD"
] |
63eec6b90613e4ccf849ebc70be8219758781310316b5e8036321ce51f89e808
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import random
from datetime import timedelta
from dateutil.relativedelta import relativedelta
from collections import namedtuple
import numpy as np
import pandas as pd
from faker import Faker
from tqdm import tqdm
PatientRecord = namedtuple(
"PatientRecord",
"subjectcode subjectage subjectvisitid subjectvisitdate "
"alzheimerbroadcategory apoe4 dataset",
)
def alzheimer_oracle(apoe4):
prob = None
if apoe4 == 0:
prob = 0.8
elif apoe4 == 1:
prob = 0.5
elif apoe4 == 2:
prob = 0.24
r = np.random.random()
return True if r <= prob else False
def get_age(birth_date, visit):
age = relativedelta(visit, birth_date)
age = age.years + age.months / 12
return age
def get_visits(fake, birth_date):
num_visits = random.randint(3, 15)
first_visit = fake.date_between(
start_date=birth_date + timedelta(days=64 * 365),
end_date=birth_date + timedelta(days=65 * 365),
)
visits = sorted(
[
fake.date_between(
start_date=first_visit, end_date=first_visit + timedelta(days=3 * 365)
)
for _ in range(num_visits)
]
)
return visits
def patients():
fake = Faker()
subject_code = fake.md5()
birth_date = fake.date_of_birth(minimum_age=75, maximum_age=90)
visits = get_visits(fake, birth_date)
apoe4 = np.random.choice([0, 1, 2], p=[0.03, 0.17, 0.8])
can_get_sick = alzheimer_oracle(apoe4)
alzheimerbroadcategory = "MCI"
for i, visit in enumerate(visits):
visit_id = fake.md5()
age = get_age(birth_date, visit)
if can_get_sick and age > 64 and alzheimerbroadcategory == "MCI":
prob = 2 ** ((age - 6) // 2) * 0.3
r = np.random.random()
if r <= prob:
alzheimerbroadcategory = "AD"
yield PatientRecord(
subject_code,
age,
visit_id,
visit.strftime("%Y-%m-%d") + " 0:00",
alzheimerbroadcategory,
apoe4,
"alzheimer_fake_cohort",
)
# if random.random() < 0.01:
# break
def cohort(num_patients):
with tqdm(total=num_patients, desc="Generating fake cohort") as pbar:
for _ in range(num_patients):
for visit in patients():
yield visit
pbar.update(1)
def main():
num_patients = 2000
data = pd.DataFrame(cohort(num_patients))
final = [g[1].iloc[-1].alzheimerbroadcategory for g in data.groupby("subjectcode")]
print(sum(1 for f in final if f == "AD") / num_patients)
data = data.set_index("subjectcode")
data.to_csv("alzheimer_fake_cohort.csv")
if __name__ == "__main__":
main()
|
madgik/exareme
|
Exareme-Docker/src/mip-algorithms/KAPLAN_MEIER/generate_fake_cohort.py
|
Python
|
mit
| 2,865
|
[
"VisIt"
] |
d3e46bd428031d7ed049d547909535655f403c4dc83c7ec739da09a8e1b98eee
|
try:
from paraview.vtk import vtkFiltersVerdict
from paraview.vtk import vtkFiltersGeneral
from paraview.vtk import vtkCommonTransforms
from paraview.vtk import vtkFiltersGeometry
from paraview.vtk import vtkFiltersExtraction
except:
import vtk as vtkFiltersVerdict
import vtk as vtkFiltersExtraction
import vtk as vtkFiltersGeneral
import vtk as vtkCommonTransforms
import vtk as vtkFiltersGeometry
try :from paraview import numpy_support
except: from vtk.util import numpy_support
import numpy
from UVParametrizationFilter import UVParametrization as UVParametrisation
from objets import ObjetPyturbo
from calculs import CalculettePyturbo
from fonctions_basiques import *
#__________________________________________________________________________________________
class Extraction(ObjetPyturbo):
"""permet d'extraire une surface quelconque
les surface possibles sont
- i= ;j= ; k= si l'entree est compose de vtkStructuredGrid (mono ou multiblock)
- toute grandeur calculable par une CalculettePyturbo
utiliser coordx, coordy et coordz
- toute autre grandeur calculable par calculs.CalculettePyturbo
indiquez la formule dans formule_extraction SANS ESPACES
coordx+coordy=12. par exemple
imin, imax etc. sont utilisables
#ToDo
completer la fonction pour pouvoir prendre une inegalite
"""
#_____________________________________________________________________________________
def __init__(self, input=None, formule_extraction=None, calculer_vecteur_normal=True,
normals_aux_cellules=False, axe=None):
#initialisation de la classe parente
attributs = locals().copy()
del attributs['self']
ObjetPyturbo.__init__(self, **attributs)
# initialisation particuliere
self._mettre_a_jour = True
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def set(self, nom_attribut, valeur):
"""fonction set specifique
gere la variable locale _changement
qui sert lorsque l'on appelle la sortie
a savoir s'il faut regenerer la coupe
"""
setattr(self, nom_attribut, valeur)
if nom_attribut != '_mettre_a_jour':
self._mettre_a_jour = True
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def __couper_bloc__(self, vtkDataSet):
"""retourne un filtre vtk adapte a la coupe desiree
il suffit ensuite de faire GetOutput() pour obtenir le resultat de la coupe
ne s'applique PAS a un multiblockdataset
"""
# VERIFICATIONS INITIALES
if isinstance(vtkDataSet, vtk.vtkMultiBlockDataSet):
raise IOError, '__couper_bloc__ ne prend PAS de MultiBlockDataSet en entree'
if self.formule_extraction is None:
raise IOError, "indiquez d'abord la self.formule_extraction pour l'extraction"
if ' ' in self.formule_extraction:
raise IOError, "la formule_extraction doit etre indiquee sans espaces"
if not '=' in self.formule_extraction:
raise IOError, "pour l'instant, seules les equations sont supportees comme formule d'extraction"
else:
cle_coupe = self.formule_extraction.split('=')[0].strip()
valeur_coupe = self.formule_extraction.split('=')[1].strip()
# EXECUTION
if cle_coupe in ['i', 'j', 'k']:
if not isinstance(vtkDataSet, vtk.vtkStructuredGrid):
raise IOError, "une coupe i, j ou k est demande, mais l'entree n'est pas un bloc structure"
filtre_vtk = vtkFiltersExtraction.vtkExtractGrid()
vtk_set_input(filtre_vtk, vtkDataSet)
extent_vtkDataSet = list(vtkDataSet.GetExtent())
exec "valeur_coupe = {0}".format(valeur_coupe.replace(
'imax', str(extent_vtkDataSet[1])).replace(
'jmax', str(extent_vtkDataSet[3])).replace(
'kmax', str(extent_vtkDataSet[5])).replace(
'imin', str(extent_vtkDataSet[0])).replace(
'jmin', str(extent_vtkDataSet[2])).replace(
'kmin', str(extent_vtkDataSet[4])))
voi = extent_vtkDataSet[0] if cle_coupe != 'i' else valeur_coupe, \
extent_vtkDataSet[1] if cle_coupe != 'i' else valeur_coupe, \
extent_vtkDataSet[2] if cle_coupe != 'j' else valeur_coupe, \
extent_vtkDataSet[3] if cle_coupe != 'j' else valeur_coupe, \
extent_vtkDataSet[4] if cle_coupe != 'k' else valeur_coupe, \
extent_vtkDataSet[5] if cle_coupe != 'k' else valeur_coupe
filtre_vtk.SetVOI(voi)
filtre_vtk.Update()
data = filtre_vtk.GetOutput()
else:
exec "valeur_coupe = float({0})".format(valeur_coupe)
calculette = CalculettePyturbo(input = vtkDataSet, axe = self.axe) if self.axe is not None else CalculettePyturbo(input = vtkDataSet)
calculette.set('a_calculer', cle_coupe)
a_couper = calculette.get_output()
a_couper = set_scalaires_actifs(
input = a_couper, loc = 'points', array_name = cle_coupe)
filtre_vtk = vtk.vtkContourFilter()
filtre_vtk.SetComputeNormals(0)
vtk_set_input(filtre_vtk, a_couper)
filtre_vtk.SetValue(0, valeur_coupe)
filtre_vtk.Update()
data = filtre_vtk.GetOutput()
#CALCUL DU VECTEUR NORMAL
if self.calculer_vecteur_normal == 1:
data = calculer_vecteur_normal(data, self.normals_aux_cellules)
return data
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def update(self):
"""genere la coupe
"""
if self.input is None:
raise IOError, "indiquez d'abord l'objet vtk en entree"
if isinstance(self.input, vtk.vtkMultiBlockDataSet):
self.output = vtk_new_instance(self.input)
for numbloc in get_numeros_blocs_non_vides(self.input):
extraction_bloc = self.__couper_bloc__(self.input.GetBlock(numbloc))
if extraction_bloc.GetNumberOfPoints() != 0:
self.output.SetBlock(numbloc, extraction_bloc)
else:
self.output = self.__couper_bloc__(self.input)
self._mettre_a_jour = False
#_____________________________________________________________________________________
#_____________________________________________________________________________________
def get_output(self):
"""retourne la sortie de la classe
mise a jour effectuee si necessaire
"""
if self._mettre_a_jour:
self.update()
return self.output
#_____________________________________________________________________________________
#_____________________________________________________________________________________
|
aurmarsan/pyturbo
|
extractions.py
|
Python
|
mit
| 7,599
|
[
"ParaView",
"VTK"
] |
84b38bac4441c7d5810869af17bcce4812e3d9a01594b58e8715d66af490f9a7
|
from os.path import basename
from os.path import join
from os.path import dirname
from os import sep
from ..util import PathHelper
COMMAND_VERSION_FILENAME = "COMMAND_VERSION"
class ClientJobDescription(object):
""" A description of how client views job - command_line, inputs, etc..
**Parameters**
command_line : str
The local command line to execute, this will be rewritten for
the remote server.
config_files : list
List of Galaxy 'configfile's produced for this job. These will
be rewritten and sent to remote server.
input_files : list
List of input files used by job. These will be transferred and
references rewritten.
client_outputs : ClientOutputs
Description of outputs produced by job (at least output files along
with optional version string and working directory outputs.
tool_dir : str
Directory containing tool to execute (if a wrapper is used, it will
be transferred to remote server).
working_directory : str
Local path created by Galaxy for running this job.
dependencies_description : list
galaxy.tools.deps.dependencies.DependencyDescription object describing
tool dependency context for remote depenency resolution.
env: list
List of dict object describing environment variables to populate.
version_file : str
Path to version file expected on the client server
arbitrary_files : dict()
Additional non-input, non-tool, non-config, non-working directory files
to transfer before staging job. This is most likely data indices but
can be anything. For now these are copied into staging working
directory but this will be reworked to find a better, more robust
location.
rewrite_paths : boolean
Indicates whether paths should be rewritten in job inputs (command_line
and config files) while staging files).
"""
def __init__(
self,
tool,
command_line,
config_files,
input_files,
client_outputs,
working_directory,
dependencies_description=None,
env=[],
arbitrary_files=None,
rewrite_paths=True,
):
self.tool = tool
self.command_line = command_line
self.config_files = config_files
self.input_files = input_files
self.client_outputs = client_outputs
self.working_directory = working_directory
self.dependencies_description = dependencies_description
self.env = env
self.rewrite_paths = rewrite_paths
self.arbitrary_files = arbitrary_files or {}
@property
def output_files(self):
return self.client_outputs.output_files
@property
def version_file(self):
return self.client_outputs.version_file
@property
def tool_dependencies(self):
if not self.remote_dependency_resolution:
return None
return dict(
requirements=(self.tool.requirements or []),
installed_tool_dependencies=(self.tool.installed_tool_dependencies or [])
)
class ClientOutputs(object):
""" Abstraction describing the output datasets EXPECTED by the Galaxy job
runner client.
"""
def __init__(self, working_directory, output_files, work_dir_outputs=None, version_file=None):
self.working_directory = working_directory
self.work_dir_outputs = work_dir_outputs
self.output_files = output_files
self.version_file = version_file
def to_dict(self):
return dict(
working_directory=self.working_directory,
work_dir_outputs=self.work_dir_outputs,
output_files=self.output_files,
version_file=self.version_file
)
@staticmethod
def from_dict(config_dict):
return ClientOutputs(
working_directory=config_dict.get('working_directory'),
work_dir_outputs=config_dict.get('work_dir_outputs'),
output_files=config_dict.get('output_files'),
version_file=config_dict.get('version_file'),
)
class PulsarOutputs(object):
""" Abstraction describing the output files PRODUCED by the remote Pulsar
server. """
def __init__(self, working_directory_contents, output_directory_contents, remote_separator=sep):
self.working_directory_contents = working_directory_contents
self.output_directory_contents = output_directory_contents
self.path_helper = PathHelper(remote_separator)
@staticmethod
def from_status_response(complete_response):
# Default to None instead of [] to distinguish between empty contents and it not set
# by the Pulsar - older Pulsar instances will not set these in complete response.
working_directory_contents = complete_response.get("working_directory_contents")
output_directory_contents = complete_response.get("outputs_directory_contents")
# Older (pre-2014) Pulsar servers will not include separator in response,
# so this should only be used when reasoning about outputs in
# subdirectories (which was not previously supported prior to that).
remote_separator = complete_response.get("system_properties", {}).get("separator", sep)
return PulsarOutputs(
working_directory_contents,
output_directory_contents,
remote_separator
)
def has_output_file(self, output_file):
return basename(output_file) in self.output_directory_contents
def output_extras(self, output_file):
"""
Returns dict mapping local path to remote name.
"""
output_directory = dirname(output_file)
def local_path(name):
return join(output_directory, self.path_helper.local_name(name))
files_directory = "%s_files%s" % (basename(output_file)[0:-len(".dat")], self.path_helper.separator)
names = filter(lambda o: o.startswith(files_directory), self.output_directory_contents)
return dict(map(lambda name: (local_path(name), name), names))
|
jmchilton/pulsar
|
pulsar/client/staging/__init__.py
|
Python
|
apache-2.0
| 6,153
|
[
"Galaxy"
] |
50850db7c04d0ce12191234fe5a6f441aeb4064b9932f83c22c2726a3f9a9059
|
#
# @file TestModifierSpeciesReference.py
# @brief ModifierSpeciesReference unit tests
#
# @author Akiya Jouraku (Python conversion)
# @author Ben Bornstein
#
# $Id$
# $HeadURL$
#
# ====== WARNING ===== WARNING ===== WARNING ===== WARNING ===== WARNING ======
#
# DO NOT EDIT THIS FILE.
#
# This file was generated automatically by converting the file located at
# src/sbml/test/TestModifierSpeciesReference.c
# using the conversion program dev/utilities/translateTests/translateTests.pl.
# Any changes made here will be lost the next time the file is regenerated.
#
# -----------------------------------------------------------------------------
# This file is part of libSBML. Please visit http://sbml.org for more
# information about SBML, and the latest version of libSBML.
#
# Copyright 2005-2010 California Institute of Technology.
# Copyright 2002-2005 California Institute of Technology and
# Japan Science and Technology Corporation.
#
# This library is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation. A copy of the license agreement is provided
# in the file named "LICENSE.txt" included with this software distribution
# and also available online as http://sbml.org/software/libsbml/license.html
# -----------------------------------------------------------------------------
import sys
import unittest
import libsbml
class TestModifierSpeciesReference(unittest.TestCase):
global MSR
MSR = None
def setUp(self):
self.MSR = libsbml.ModifierSpeciesReference(2,4)
if (self.MSR == None):
pass
pass
def tearDown(self):
_dummyList = [ self.MSR ]; _dummyList[:] = []; del _dummyList
pass
def test_ModifierSpeciesReference_create(self):
self.assert_( self.MSR.getTypeCode() == libsbml.SBML_MODIFIER_SPECIES_REFERENCE )
self.assert_( self.MSR.getMetaId() == "" )
self.assert_( self.MSR.getNotes() == None )
self.assert_( self.MSR.getAnnotation() == None )
self.assert_( self.MSR.getSpecies() == "" )
self.assertEqual( False, self.MSR.isSetSpecies() )
self.assertEqual( True, self.MSR.isModifier() )
pass
def test_ModifierSpeciesReference_createWithNS(self):
xmlns = libsbml.XMLNamespaces()
xmlns.add( "http://www.sbml.org", "testsbml")
sbmlns = libsbml.SBMLNamespaces(2,1)
sbmlns.addNamespaces(xmlns)
object = libsbml.ModifierSpeciesReference(sbmlns)
self.assert_( object.getTypeCode() == libsbml.SBML_MODIFIER_SPECIES_REFERENCE )
self.assert_( object.getMetaId() == "" )
self.assert_( object.getNotes() == None )
self.assert_( object.getAnnotation() == None )
self.assert_( object.getLevel() == 2 )
self.assert_( object.getVersion() == 1 )
self.assert_( object.getNamespaces() != None )
n = object.getNamespaces()
self.assert_( n.getLength() == 2 )
_dummyList = [ object ]; _dummyList[:] = []; del _dummyList
pass
def test_ModifierSpeciesReference_free_NULL(self):
_dummyList = [ None ]; _dummyList[:] = []; del _dummyList
pass
def test_ModifierSpeciesReference_setSpecies(self):
species = "s1";
self.MSR.setSpecies(species)
s = self.MSR.getSpecies()
self.assert_(( species == s ))
self.assertEqual( True, self.MSR.isSetSpecies() )
if (self.MSR.getSpecies() == species):
pass
s = self.MSR.getSpecies()
self.MSR.setSpecies(s)
s = self.MSR.getSpecies()
self.assert_(( species == s ))
self.MSR.setSpecies("")
self.assertEqual( False, self.MSR.isSetSpecies() )
if (self.MSR.getSpecies() != None):
pass
pass
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestModifierSpeciesReference))
return suite
if __name__ == "__main__":
if unittest.TextTestRunner(verbosity=1).run(suite()).wasSuccessful() :
sys.exit(0)
else:
sys.exit(1)
|
alexholehouse/SBMLIntegrator
|
libsbml-5.0.0/src/bindings/python/test/sbml/TestModifierSpeciesReference.py
|
Python
|
gpl-3.0
| 3,969
|
[
"VisIt"
] |
ec0d2fdd648f0dd769f3c1a697042ce081ffa995e5646e3969addf33a1caee91
|
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for compute resource tracking."""
import copy
import datetime
import uuid
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils import timeutils
import six
from nova.compute.monitors import base as monitor_base
from nova.compute import resource_tracker
from nova.compute import resources
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.pci import fakes as pci_fakes
from nova.tests import uuidsentinel
from nova.virt import driver
FAKE_VIRT_MEMORY_MB = 5
FAKE_VIRT_MEMORY_OVERHEAD = 1
FAKE_VIRT_MEMORY_WITH_OVERHEAD = (
FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD)
FAKE_VIRT_NUMA_TOPOLOGY = objects.NUMATopology(
cells=[objects.NUMACell(id=0, cpuset=set([1, 2]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([])),
objects.NUMACell(id=1, cpuset=set([3, 4]), memory=3072,
cpu_usage=0, memory_usage=0, mempages=[],
siblings=[], pinned_cpus=set([]))])
FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD = objects.NUMATopologyLimits(
cpu_allocation_ratio=2, ram_allocation_ratio=2)
ROOT_GB = 5
EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
class UnsupportedVirtDriver(driver.ComputeDriver):
"""Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
# no support for getting resource usage info
return {}
class FakeVirtDriver(driver.ComputeDriver):
def __init__(self, pci_support=False, stats=None,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY):
super(FakeVirtDriver, self).__init__(None)
self.memory_mb = FAKE_VIRT_MEMORY_MB
self.local_gb = FAKE_VIRT_LOCAL_GB
self.vcpus = FAKE_VIRT_VCPUS
self.numa_topology = numa_topology
self.memory_mb_used = 0
self.local_gb_used = 0
self.pci_support = pci_support
self.pci_devices = [
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.1',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.2',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
'parent_addr': '0000:00:01.0',
},
{
'label': 'label_8086_0443',
'dev_type': fields.PciDeviceType.SRIOV_PF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0443',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_0123',
'dev_type': 'type-PCI',
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '0123',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': 1,
},
{
'label': 'label_8086_7891',
'dev_type': fields.PciDeviceType.SRIOV_VF,
'compute_node_id': 1,
'address': '0000:00:01.0',
'product_id': '7891',
'vendor_id': '8086',
'status': 'available',
'extra_k1': 'v1',
'numa_node': None,
'parent_addr': '0000:08:01.0',
},
] if self.pci_support else []
self.pci_stats = [
{
'count': 2,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '0443',
'numa_node': 1,
'dev_type': fields.PciDeviceType.SRIOV_PF
},
{
'count': 1,
'vendor_id': '8086',
'product_id': '7891',
'numa_node': None,
'dev_type': fields.PciDeviceType.SRIOV_VF
},
] if self.pci_support else []
if stats is not None:
self.stats = stats
def get_host_ip_addr(self):
return '127.0.0.1'
def get_available_resource(self, nodename):
d = {
'vcpus': self.vcpus,
'memory_mb': self.memory_mb,
'local_gb': self.local_gb,
'vcpus_used': 0,
'memory_mb_used': self.memory_mb_used,
'local_gb_used': self.local_gb_used,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fakehost',
'cpu_info': '',
'numa_topology': (
self.numa_topology._to_json() if self.numa_topology else None),
}
if self.pci_support:
d['pci_passthrough_devices'] = jsonutils.dumps(self.pci_devices)
if hasattr(self, 'stats'):
d['stats'] = self.stats
return d
def estimate_instance_overhead(self, instance_info):
instance_info['memory_mb'] # make sure memory value is present
overhead = {
'memory_mb': FAKE_VIRT_MEMORY_OVERHEAD
}
return overhead # just return a constant value for testing
class BaseTestCase(test.TestCase):
@mock.patch('stevedore.enabled.EnabledExtensionManager')
def setUp(self, _mock_ext_mgr):
super(BaseTestCase, self).setUp()
self.flags(reserved_host_disk_mb=0,
reserved_host_memory_mb=0)
self.context = context.get_admin_context()
self._set_pci_passthrough_whitelist()
self.flags(use_local=True, group='conductor')
self.conductor = self.start_service('conductor',
manager=CONF.conductor.manager)
self._instances = {}
self._instance_types = {}
self.stubs.Set(objects.InstanceList, 'get_by_host_and_node',
self._fake_instance_get_by_host_and_node)
self.stubs.Set(self.conductor.db,
'flavor_get', self._fake_flavor_get)
self.host = 'fakehost'
self.compute = self._create_compute_node()
self.updated = False
self.deleted = False
self.update_call_count = 0
def _set_pci_passthrough_whitelist(self):
self.flags(pci_passthrough_whitelist=[
'{"vendor_id": "8086", "product_id": "0443"}',
'{"vendor_id": "8086", "product_id": "7891"}'])
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"uuid": uuidsentinel.fake_compute_node,
"service_id": 1,
"host": "fakehost",
"vcpus": 1,
"memory_mb": 1,
"local_gb": 1,
"vcpus_used": 1,
"memory_mb_used": 1,
"local_gb_used": 1,
"free_ram_mb": 1,
"free_disk_gb": 1,
"current_workload": 1,
"running_vms": 0,
"cpu_info": None,
"numa_topology": None,
"stats": '{"num_instances": "1"}',
"hypervisor_hostname": "fakenode",
'hypervisor_version': 1,
'hypervisor_type': 'fake-hyp',
'disk_available_least': None,
'host_ip': None,
'metrics': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'cpu_allocation_ratio': None,
'ram_allocation_ratio': None,
'disk_allocation_ratio': None,
}
if values:
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
service = {
"id": 1,
"host": host,
"binary": "nova-compute",
"topic": "compute",
"compute_node": compute,
"report_count": 0,
'disabled': False,
'disabled_reason': None,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'last_seen_up': None,
'forced_down': False,
'version': 0,
}
return service
def _fake_instance_obj(self, stash=True, flavor=None, **kwargs):
# Default to an instance ready to resize to or from the same
# instance_type
flavor = flavor or self._fake_flavor_create()
if not isinstance(flavor, objects.Flavor):
flavor = objects.Flavor(**flavor)
instance_uuid = str(uuid.uuid1())
instance = objects.Instance(context=self.context, uuid=instance_uuid,
flavor=flavor)
instance.update({
'vm_state': vm_states.RESIZED,
'task_state': None,
'ephemeral_key_uuid': None,
'os_type': 'Linux',
'project_id': '123456',
'host': None,
'node': None,
'instance_type_id': flavor['id'],
'memory_mb': flavor['memory_mb'],
'vcpus': flavor['vcpus'],
'root_gb': flavor['root_gb'],
'ephemeral_gb': flavor['ephemeral_gb'],
'launched_on': None,
'system_metadata': {},
'availability_zone': None,
'vm_mode': None,
'reservation_id': None,
'display_name': None,
'default_swap_device': None,
'power_state': None,
'access_ip_v6': None,
'access_ip_v4': None,
'key_name': None,
'updated_at': None,
'cell_name': None,
'locked': None,
'locked_by': None,
'launch_index': None,
'architecture': None,
'auto_disk_config': None,
'terminated_at': None,
'ramdisk_id': None,
'user_data': None,
'cleaned': None,
'deleted_at': None,
'id': 333,
'disable_terminate': None,
'hostname': None,
'display_description': None,
'key_data': None,
'deleted': None,
'default_ephemeral_device': None,
'progress': None,
'launched_at': None,
'config_drive': None,
'kernel_id': None,
'user_id': None,
'shutdown_terminate': None,
'created_at': None,
'image_ref': None,
'root_device_name': None,
})
if stash:
instance.old_flavor = flavor
instance.new_flavor = flavor
instance.numa_topology = kwargs.pop('numa_topology', None)
instance.update(kwargs)
self._instances[instance_uuid] = instance
return instance
def _fake_flavor_create(self, **kwargs):
instance_type = {
'id': 1,
'created_at': None,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'disabled': False,
'is_public': True,
'name': 'fakeitype',
'memory_mb': FAKE_VIRT_MEMORY_MB,
'vcpus': FAKE_VIRT_VCPUS,
'root_gb': ROOT_GB,
'ephemeral_gb': EPHEMERAL_GB,
'swap': 0,
'rxtx_factor': 1.0,
'vcpu_weight': 1,
'flavorid': 'fakeflavor',
'extra_specs': {},
}
instance_type.update(**kwargs)
instance_type = objects.Flavor(**instance_type)
id_ = instance_type['id']
self._instance_types[id_] = instance_type
return instance_type
def _fake_instance_get_by_host_and_node(self, context, host, nodename,
expected_attrs=None):
return objects.InstanceList(
objects=[i for i in self._instances.values() if i['host'] == host])
def _fake_flavor_get(self, ctxt, id_):
return self._instance_types[id_]
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _driver(self):
return FakeVirtDriver()
def _tracker(self, host=None):
if host is None:
host = self.host
node = "fakenode"
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
class UnsupportedDriverTestCase(BaseTestCase):
"""Resource tracking should be disabled when the virt driver doesn't
support it.
"""
def setUp(self):
super(UnsupportedDriverTestCase, self).setUp()
self.tracker = self._tracker()
# seed tracker with data:
self.tracker.update_available_resource(self.context)
def _driver(self):
return UnsupportedVirtDriver()
def test_disabled(self):
# disabled = no compute node stats
self.assertTrue(self.tracker.disabled)
self.assertIsNone(self.tracker.compute_node)
def test_disabled_claim(self):
# basic claim:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
def test_disabled_instance_claim(self):
# instance variation:
instance = self._fake_instance_obj()
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertEqual(0, claim.memory_mb)
@mock.patch('nova.objects.Instance.save')
def test_disabled_instance_context_claim(self, mock_save):
# instance context manager variation:
instance = self._fake_instance_obj()
self.tracker.instance_claim(self.context, instance)
with self.tracker.instance_claim(self.context, instance) as claim:
self.assertEqual(0, claim.memory_mb)
def test_disabled_updated_usage(self):
instance = self._fake_instance_obj(host='fakehost', memory_mb=5,
root_gb=10)
self.tracker.update_usage(self.context, instance)
def test_disabled_resize_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
claim = self.tracker.resize_claim(self.context, instance,
instance_type)
self.assertEqual(0, claim.memory_mb)
self.assertEqual(instance['uuid'], claim.migration['instance_uuid'])
self.assertEqual(instance_type['id'],
claim.migration['new_instance_type_id'])
def test_disabled_resize_context_claim(self):
instance = self._fake_instance_obj()
instance_type = self._fake_flavor_create()
with self.tracker.resize_claim(self.context, instance, instance_type) \
as claim:
self.assertEqual(0, claim.memory_mb)
class MissingComputeNodeTestCase(BaseTestCase):
def setUp(self):
super(MissingComputeNodeTestCase, self).setUp()
self.tracker = self._tracker()
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_create',
self._fake_create_compute_node)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def _fake_create_compute_node(self, context, values):
self.created = True
return self._create_compute_node(values)
def _fake_service_get_by_compute_host(self, ctx, host):
# return a service with no joined compute
service = self._create_service()
return service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
# return no compute node
raise exception.ComputeHostNotFound(host=host)
def test_create_compute_node(self):
self.tracker.compute_node = None
self.tracker.update_available_resource(self.context)
self.assertTrue(self.created)
def test_enabled(self):
self.tracker.update_available_resource(self.context)
self.assertFalse(self.tracker.disabled)
class BaseTrackerTestCase(BaseTestCase):
def setUp(self):
# setup plumbing for a working resource tracker with required
# database models and a compatible compute driver:
super(BaseTrackerTestCase, self).setUp()
self.tracker = self._tracker()
self._migrations = {}
self.stub_out('nova.db.service_get_by_compute_host',
self._fake_service_get_by_compute_host)
self.stub_out('nova.db.compute_node_get_by_host_and_nodename',
self._fake_compute_node_get_by_host_and_nodename)
self.stub_out('nova.db.compute_node_update',
self._fake_compute_node_update)
self.stub_out('nova.db.compute_node_delete',
self._fake_compute_node_delete)
self.stub_out('nova.db.migration_update',
self._fake_migration_update)
self.stub_out('nova.db.migration_get_in_progress_by_host_and_node',
self._fake_migration_get_in_progress_by_host_and_node)
# Note that this must be called before the call to _init_tracker()
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self._init_tracker()
self.limits = self._limits()
def _fake_service_get_by_compute_host(self, ctx, host):
self.service = self._create_service(host, compute=self.compute)
return self.service
def _fake_compute_node_get_by_host_and_nodename(self, ctx, host, nodename):
self.compute = self._create_compute_node()
return self.compute
def _fake_compute_node_update(self, ctx, compute_node_id, values,
prune_stats=False):
self.update_call_count += 1
self.updated = True
self.compute.update(values)
return self.compute
def _fake_compute_node_delete(self, ctx, compute_node_id):
self.deleted = True
self.compute.update({'deleted': 1})
return self.compute
def _fake_migration_get_in_progress_by_host_and_node(self, ctxt, host,
node):
status = ['confirmed', 'reverted', 'error']
migrations = []
for migration in self._migrations.values():
migration = obj_base.obj_to_primitive(migration)
if migration['status'] in status:
continue
uuid = migration['instance_uuid']
migration['instance'] = self._instances[uuid]
migrations.append(migration)
return migrations
def _fake_migration_update(self, ctxt, migration_id, values):
# cheat and assume there's only 1 migration present
migration = list(self._migrations.values())[0]
migration.update(values)
return migration
def _init_tracker(self):
self.tracker.update_available_resource(self.context)
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_WITH_OVERHEAD,
disk_gb=FAKE_VIRT_LOCAL_GB,
vcpus=FAKE_VIRT_VCPUS,
numa_topology=FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD):
"""Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
'disk_gb': disk_gb,
'vcpu': vcpus,
'numa_topology': numa_topology,
}
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'memory', 'id', 'cpu_usage', 'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
tracker = self.tracker
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
value, objects.NUMATopology.obj_from_db_obj(x))
else:
self.assertEqual(value, x)
class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(len(expected),
len(self.tracker.compute_node.pci_device_pools))
for expected_pool, actual_pool in zip(
expected, self.tracker.compute_node.pci_device_pools):
self.assertEqual(expected_pool, actual_pool)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(inst)
mock_save.assert_called_once_with()
self.assertEqual(self.tracker.host, inst.host)
self.assertEqual(self.tracker.nodename, inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
def test_unset_instance_host_and_node(self):
inst = objects.Instance()
with mock.patch.object(inst, 'save') as mock_save:
self.tracker._set_instance_host_and_node(inst)
self.tracker._unset_instance_host_and_node(inst)
self.assertEqual(2, mock_save.call_count)
self.assertIsNone(inst.host)
self.assertIsNone(inst.node)
self.assertEqual(self.tracker.host, inst.launched_on)
class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
class TrackerPciStatsTestCase(BaseTrackerTestCase):
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
self.assertTrue(self.updated)
def test_init(self):
driver = self._driver()
self._assert(FAKE_VIRT_MEMORY_MB, 'memory_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'local_gb')
self._assert(FAKE_VIRT_VCPUS, 'vcpus')
self._assert(FAKE_VIRT_NUMA_TOPOLOGY, 'numa_topology')
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self._assert(0, 'running_vms')
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_empty_ext_resources(self):
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
fake_write_resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
def _instance_topology(self, mem):
mem = mem * 1024
return objects.InstanceNUMATopology(
cells=[objects.InstanceNUMACell(
id=0, cpuset=set([1]), memory=mem),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), memory=mem)])
def _claim_topology(self, mem, cpus=1):
if self.tracker.driver.numa_topology is None:
return None
mem = mem * 1024
return objects.NUMATopology(
cells=[objects.NUMACell(
id=0, cpuset=set([1, 2]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([])),
objects.NUMACell(
id=1, cpuset=set([3, 4]), memory=3072, cpu_usage=cpus,
memory_usage=mem, mempages=[], siblings=[],
pinned_cpus=set([]))])
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_instance_claim_with_oversubscription(self, mock_get):
memory_mb = FAKE_VIRT_MEMORY_MB * 2
root_gb = ephemeral_gb = FAKE_VIRT_LOCAL_GB
vcpus = FAKE_VIRT_VCPUS * 2
claim_topology = self._claim_topology(3)
instance_topology = self._instance_topology(3)
limits = {'memory_mb': memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
'disk_gb': root_gb * 2,
'vcpu': vcpus,
'numa_topology': FAKE_VIRT_NUMA_TOPOLOGY_OVERHEAD}
instance = self._fake_instance_obj(memory_mb=memory_mb,
root_gb=root_gb, ephemeral_gb=ephemeral_gb,
numa_topology=instance_topology)
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_additive_claims(self, mock_save, mock_get):
self.limits['vcpu'] = 2
claim_topology = self._claim_topology(2, cpus=2)
flavor = self._fake_flavor_create(
memory_mb=1, root_gb=1, ephemeral_gb=0)
instance_topology = self._instance_topology(1)
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
instance = self._fake_instance_obj(
flavor=flavor, numa_topology=instance_topology)
with self.tracker.instance_claim(self.context, instance, self.limits):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_context_claim_with_exception(self, mock_save, mock_get):
instance = self._fake_instance_obj(memory_mb=1, root_gb=1,
ephemeral_gb=1)
try:
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
raise test.TestingException()
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
FAKE_VIRT_NUMA_TOPOLOGY,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_update_load_stats_for_instance(self, mock_get):
instance = self._fake_instance_obj(task_state=task_states.SCHEDULING)
with mock.patch.object(instance, 'save'):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
# DELETED, but the DB record is not yet deleted.
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
migration = objects.Migration(context=self.context,
migration_type='resize',
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_instances_with_live_migrations(self, mock_instance_list,
mock_migration_list):
instance = self._fake_instance_obj()
migration = objects.Migration(context=self.context,
migration_type='live-migration',
instance_uuid=instance.uuid)
mock_migration_list.return_value = [migration]
mock_instance_list.return_value = [instance]
with mock.patch.object(self.tracker, '_pair_instances_to_migrations'
) as mock_pair:
self.tracker.update_available_resource(self.context)
self.assertTrue(mock_pair.called)
self.assertEqual(
instance.uuid,
mock_pair.call_args_list[0][0][0][0].instance_uuid)
self.assertEqual(instance.uuid,
mock_pair.call_args_list[0][0][1][0].uuid)
self.assertEqual(
['system_metadata', 'numa_topology', 'flavor',
'migration_context'],
mock_instance_list.call_args_list[0][1]['expected_attrs'])
self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.assertEqual(ROOT_GB + EPHEMERAL_GB,
self.tracker.compute_node['local_gb_used'])
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
def test_pair_instances_to_migrations(self):
migrations = [objects.Migration(instance_uuid=uuidsentinel.instance1),
objects.Migration(instance_uuid=uuidsentinel.instance2)]
instances = [objects.Instance(uuid=uuidsentinel.instance2),
objects.Instance(uuid=uuidsentinel.instance1)]
self.tracker._pair_instances_to_migrations(migrations, instances)
order = [uuidsentinel.instance1, uuidsentinel.instance2]
for i, migration in enumerate(migrations):
self.assertEqual(order[i], migration.instance.uuid)
@mock.patch('nova.compute.claims.Claim')
@mock.patch('nova.objects.Instance.save')
def test_claim_saves_numa_topology(self, mock_save, mock_claim):
def fake_save():
self.assertEqual(set(['numa_topology', 'host', 'node',
'launched_on']),
inst.obj_what_changed())
mock_save.side_effect = fake_save
inst = objects.Instance(host=None, node=None, memory_mb=1024,
uuid=uuidsentinel.instance1)
inst.obj_reset_changes()
numa = objects.InstanceNUMATopology()
claim = mock.MagicMock()
claim.claimed_numa_topology = numa
mock_claim.return_value = claim
with mock.patch.object(self.tracker, '_update_usage_from_instance'):
self.tracker.instance_claim(self.context, inst)
mock_save.assert_called_once_with()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_claim_sets_instance_host_and_node(self, mock_get):
instance = self._fake_instance_obj()
self.assertIsNone(instance['host'])
self.assertIsNone(instance['launched_on'])
self.assertIsNone(instance['node'])
with mock.patch.object(instance, 'save'):
claim = self.tracker.instance_claim(self.context, instance)
self.assertNotEqual(0, claim.memory_mb)
self.assertEqual('fakehost', instance['host'])
self.assertEqual('fakehost', instance['launched_on'])
self.assertEqual('fakenode', instance['node'])
class _MoveClaimTestCase(BaseTrackerTestCase):
def setUp(self):
super(_MoveClaimTestCase, self).setUp()
self.instance = self._fake_instance_obj()
self.instance_type = self._fake_flavor_create()
self.claim_method = self.tracker._move_claim
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_additive_claims(self, mock_get, mock_save):
limits = self._limits(
2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
2 * FAKE_VIRT_LOCAL_GB,
2 * FAKE_VIRT_VCPUS)
self.claim_method(
self.context, self.instance, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
mock_save.reset_mock()
instance2 = self._fake_instance_obj()
self.claim_method(
self.context, instance2, self.instance_type, limits=limits)
mock_save.assert_called_once_with()
self._assert(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD, 'memory_mb_used')
self._assert(2 * FAKE_VIRT_LOCAL_GB, 'local_gb_used')
self._assert(2 * FAKE_VIRT_VCPUS, 'vcpus_used')
@mock.patch('nova.objects.Instance.save')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
def test_move_type_not_tracked(self, mock_get, mock_save):
self.claim_method(self.context, self.instance, self.instance_type,
limits=self.limits, move_type="live-migration")
mock_save.assert_called_once_with()
self._assert(0, 'memory_mb_used')
self._assert(0, 'local_gb_used')
self._assert(0, 'vcpus_used')
self.assertEqual(0, len(self.tracker.tracked_migrations))
@mock.patch('nova.objects.Instance.save')
@mock.patch.object(objects.Migration, 'save')
def test_existing_migration(self, save_mock, save_inst_mock):
migration = objects.Migration(self.context, id=42,
instance_uuid=self.instance.uuid,
source_compute='fake-other-compute',
source_node='fake-other-node',
status='accepted',
migration_type='evacuation')
self.claim_method(self.context, self.instance, self.instance_type,
migration=migration)
self.assertEqual(self.tracker.host, migration.dest_compute)
self.assertEqual(self.tracker.nodename, migration.dest_node)
self.assertEqual("pre-migrating", migration.status)
self.assertEqual(1, len(self.tracker.tracked_migrations))
save_mock.assert_called_once_with()
save_inst_mock.assert_called_once_with()
class ResizeClaimTestCase(_MoveClaimTestCase):
def setUp(self):
super(ResizeClaimTestCase, self).setUp()
self.claim_method = self.tracker.resize_claim
def test_move_type_not_tracked(self):
self.skipTest("Resize_claim does already sets the move_type.")
def test_existing_migration(self):
self.skipTest("Resize_claim does not support having existing "
"migration record.")
class OrphanTestCase(BaseTrackerTestCase):
def _driver(self):
class OrphanVirtDriver(FakeVirtDriver):
def get_per_instance_usage(self):
return {
'1-2-3-4-5': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '1-2-3-4-5'},
'2-3-4-5-6': {'memory_mb': FAKE_VIRT_MEMORY_MB,
'uuid': '2-3-4-5-6'},
}
return OrphanVirtDriver()
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
self._fake_instance_obj()
orphans = self.tracker._find_orphaned_instances()
self.assertEqual(2, len(orphans))
class ComputeMonitorTestCase(BaseTestCase):
def setUp(self):
super(ComputeMonitorTestCase, self).setUp()
self.tracker = self._tracker()
self.node_name = 'nodename'
self.user_id = 'fake'
self.project_id = 'fake'
self.info = {}
self.context = context.RequestContext(self.user_id,
self.project_id)
def test_get_host_metrics_none(self):
self.tracker.monitors = []
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
self.assertEqual(len(metrics), 0)
@mock.patch.object(resource_tracker.LOG, 'warning')
def test_get_host_metrics_exception(self, mock_LOG_warning):
monitor = mock.MagicMock()
monitor.add_metrics_to_list.side_effect = Exception
self.tracker.monitors = [monitor]
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_LOG_warning.assert_called_once_with(
u'Cannot get the metrics from %(mon)s; error: %(exc)s', mock.ANY)
self.assertEqual(0, len(metrics))
def test_get_host_metrics(self):
class FakeCPUMonitor(monitor_base.MonitorBase):
NOW_TS = timeutils.utcnow()
def __init__(self, *args):
super(FakeCPUMonitor, self).__init__(*args)
self.source = 'FakeCPUMonitor'
def get_metric_names(self):
return set(["cpu.frequency"])
def get_metrics(self):
return [("cpu.frequency", 100, self.NOW_TS)]
self.tracker.monitors = [FakeCPUMonitor(None)]
mock_notifier = mock.Mock()
with mock.patch.object(rpc, 'get_notifier',
return_value=mock_notifier) as mock_get:
metrics = self.tracker._get_host_metrics(self.context,
self.node_name)
mock_get.assert_called_once_with(service='compute',
host=self.node_name)
expected_metrics = [
{
'timestamp': FakeCPUMonitor.NOW_TS.isoformat(),
'name': 'cpu.frequency',
'value': 100,
'source': 'FakeCPUMonitor'
},
]
payload = {
'metrics': expected_metrics,
'host': self.tracker.host,
'host_ip': CONF.my_ip,
'nodename': self.node_name
}
mock_notifier.info.assert_called_once_with(
self.context, 'compute.metrics.update', payload)
self.assertEqual(metrics, expected_metrics)
class TrackerPeriodicTestCase(BaseTrackerTestCase):
def test_periodic_status_update(self):
# verify update called on instantiation
self.assertEqual(1, self.update_call_count)
# verify update not called if no change to resources
self.tracker.update_available_resource(self.context)
self.assertEqual(1, self.update_call_count)
# verify update is called when resources change
driver = self.tracker.driver
driver.memory_mb += 1
self.tracker.update_available_resource(self.context)
self.assertEqual(2, self.update_call_count)
def test_update_available_resource_calls_locked_inner(self):
@mock.patch.object(self.tracker, 'driver')
@mock.patch.object(self.tracker,
'_update_available_resource')
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)
_test()
class StatsDictTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
stats as a dictionary.
"""
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def test_virt_stats(self):
# start with virt driver stats
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.items():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidTypeTestCase(BaseTrackerTestCase):
"""Test stats handling for a virt driver that provides
an invalid type for stats.
"""
def _driver(self):
return FakeVirtDriver(stats=10)
def _init_tracker(self):
# do not do initial update in setup
pass
def test_virt_stats(self):
# should throw exception for incorrect stats value type
self.assertRaises(ValueError,
self.tracker.update_available_resource,
context=self.context)
class UpdateUsageFromInstanceTestCase(BaseTrackerTestCase):
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage')
def test_building(self, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.BUILDING
self.tracker._update_usage_from_instance(self.context, instance)
mock_update_usage.assert_called_once_with(instance, sign=1)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage')
def test_shelve_offloading(self, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
self.tracker.tracked_instances = {}
self.tracker.tracked_instances[
instance.uuid] = obj_base.obj_to_primitive(instance)
self.tracker._update_usage_from_instance(self.context, instance)
mock_update_usage.assert_called_once_with(instance, sign=-1)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage')
def test_unshelving(self, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
self.tracker._update_usage_from_instance(self.context, instance)
mock_update_usage.assert_called_once_with(instance, sign=1)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage')
def test_deleted(self, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.DELETED
self.tracker.tracked_instances = {}
self.tracker.tracked_instances[
instance.uuid] = obj_base.obj_to_primitive(instance)
self.tracker._update_usage_from_instance(self.context, instance, True)
mock_update_usage.assert_called_once_with(instance, sign=-1)
class UpdateUsageFromMigrationsTestCase(BaseTrackerTestCase):
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
def test_no_migrations(self, mock_update_usage):
migrations = []
self.tracker._update_usage_from_migrations(self.context, migrations)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_instance_not_found(self, mock_get_instance, mock_update_usage):
mock_get_instance.side_effect = exception.InstanceNotFound(
instance_id='some_id',
)
migration = objects.Migration(
context=self.context,
instance_uuid='some_uuid',
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, 'some_uuid')
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_update_usage_called(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_flavor_not_found(self, mock_get_instance, mock_update_usage):
mock_update_usage.side_effect = exception.FlavorNotFound(flavor_id='')
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_not_resizing_state(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
instance.vm_state = vm_states.ACTIVE
instance.task_state = task_states.SUSPENDING
mock_get_instance.return_value = instance
migration = objects.Migration(
context=self.context,
instance_uuid=instance.uuid,
)
self.tracker._update_usage_from_migrations(self.context, [migration])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
self.assertFalse(mock_update_usage.called)
@mock.patch.object(resource_tracker.ResourceTracker,
'_update_usage_from_migration')
@mock.patch('nova.objects.instance.Instance.get_by_uuid')
def test_use_most_recent(self, mock_get_instance, mock_update_usage):
instance = self._fake_instance_obj()
mock_get_instance.return_value = instance
migration_2002 = objects.Migration(
id=2002,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2002, 1, 1, 0, 0, 0),
)
migration_2003 = objects.Migration(
id=2003,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2003, 1, 1, 0, 0, 0),
)
migration_2001 = objects.Migration(
id=2001,
context=self.context,
instance_uuid=instance.uuid,
updated_at=datetime.datetime(2001, 1, 1, 0, 0, 0),
)
self.tracker._update_usage_from_migrations(
self.context, [migration_2002, migration_2003, migration_2001])
mock_get_instance.assert_called_once_with(self.context, instance.uuid)
mock_update_usage.assert_called_once_with(
self.context, instance, None, migration_2003)
|
NeCTAR-RC/nova
|
nova/tests/unit/compute/test_resource_tracker.py
|
Python
|
apache-2.0
| 59,930
|
[
"exciting"
] |
3bca63903929ffa36bcfd9e00a7cb0fa3ab6bb7e694110256ae6abc8ec44680b
|
""" Priority corrector for the group and in-group shares
"""
__RCSID__ = "$Id$"
from DIRAC.Core.Utilities import ObjectLoader
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
from DIRAC.WorkloadManagementSystem.private.correctors.BaseCorrector import BaseCorrector
from DIRAC import gLogger, S_OK, S_ERROR
class SharesCorrector( object ):
def __init__( self, opsHelper ):
if not opsHelper:
opsHelper = Operations()
self.__opsHelper = opsHelper
self.__log = gLogger.getSubLogger( "SharesCorrector" )
self.__shareCorrectors = {}
self.__correctorsOrder = []
self.__baseCS = "JobScheduling/ShareCorrections"
self.__objLoader = ObjectLoader.ObjectLoader()
def __getCSValue( self, path, defaultValue = '' ):
return self.__opsHelper.getValue( "%s/%s" % ( self.__baseCS, path), defaultValue )
def __getCorrectorClass( self, correctorName ):
baseImport = "WorkloadManagementSystem.private.correctors"
fullCN = "%s.%sCorrector" % ( baseImport, correctorName )
result = self.__objLoader.getObjects( baseImport, ".*Corrector", parentClass = BaseCorrector )
if not result[ 'OK' ]:
return result
data = result[ 'Value' ]
if fullCN not in data:
return S_ERROR( "Can't find corrector %s" % fullCN )
return S_OK( data[ fullCN ] )
def instantiateRequiredCorrectors( self ):
correctorsToStart = self.__getCSValue( "ShareCorrectorsToStart", [] )
self.__correctorsOrder = correctorsToStart
self.__log.info( "Correctors requested: %s" % ", ".join( correctorsToStart ) )
for corrector in self.__shareCorrectors:
if corrector not in correctorsToStart:
self.__log.info( "Stopping corrector %s" % corrector )
del( self.__shareCorrectors[ corrector ] )
for corrector in correctorsToStart:
if corrector not in self.__shareCorrectors:
self.__log.info( "Starting corrector %s" % corrector )
result = self.__opsHelper.getSections( "%s/%s" % ( self.__baseCS, corrector ) )
if not result[ 'OK' ]:
self.__log.error( "Cannot get list of correctors to instantiate",
" for corrector type %s: %s" % ( corrector, result[ 'Message' ] ) )
continue
groupCorrectors = result[ 'Value' ]
self.__shareCorrectors[ corrector ] = {}
result = self.__getCorrectorClass( corrector )
if not result[ 'OK' ]:
self.__log.error( "Cannot instantiate corrector", "%s %s" % ( corrector, result[ 'Message' ] ) )
continue
correctorClass = result[ 'Value' ]
for groupCor in groupCorrectors:
groupPath = "%s/%s/Group" % ( corrector, groupCor )
groupToCorrect = self.__getCSValue( groupPath, "" )
if groupToCorrect:
groupKey = "gr:%s" % groupToCorrect
else:
groupKey = "global"
self.__log.info( "Instantiating group corrector %s (%s) of type %s" % ( groupCor,
groupToCorrect,
corrector ) )
if groupKey in self.__shareCorrectors[ corrector ]:
self.__log.error( "There are two group correctors defined",
" for %s type (group %s)" % ( corrector, groupToCorrect ) )
else:
groupCorPath = "%s/%s/%s" % ( self.__baseCS, corrector, groupCor )
correctorObj = correctorClass( self.__opsHelper, groupCorPath, groupToCorrect )
result = correctorObj.initialize()
if not result[ 'OK' ]:
self.__log.error( "Could not initialize corrector %s for %s: %s" % ( corrector, groupKey, result[ 'Message' ] ) )
else:
self.__shareCorrectors[ corrector ][ groupKey ] = correctorObj
return S_OK()
def updateCorrectorsKnowledge( self ):
for corrector in self.__shareCorrectors:
for groupTC in self.__shareCorrectors[ corrector ]:
self.__shareCorrectors[ corrector ][ groupTC ].updateHistoryKnowledge()
def update( self ):
self.instantiateRequiredCorrectors()
self.updateCorrectorsKnowledge()
def correctShares( self, shareDict, group = '' ):
if group:
groupKey = "gr:%s" % group
else:
groupKey = "global"
for corrector in self.__shareCorrectors:
if groupKey in self.__shareCorrectors[ corrector ]:
shareDict = self.__shareCorrectors[ corrector ][ groupKey ].applyCorrection( shareDict )
return shareDict
|
andresailer/DIRAC
|
WorkloadManagementSystem/private/SharesCorrector.py
|
Python
|
gpl-3.0
| 4,590
|
[
"DIRAC"
] |
9337ee987db7c17a477f12f3c3b1ed4b5bb34fbf73ab62b40bc370e98f723c86
|
"""
Generate samples of synthetic data sets.
"""
# Authors: B. Thirion, G. Varoquaux, A. Gramfort, V. Michel, O. Grisel,
# G. Louppe, J. Nothman
# License: BSD 3 clause
import numbers
import array
import numpy as np
from scipy import linalg
import scipy.sparse as sp
from ..preprocessing import MultiLabelBinarizer
from ..utils import check_array, check_random_state
from ..utils import shuffle as util_shuffle
from ..utils.fixes import astype
from ..utils.random import sample_without_replacement
from ..externals import six
map = six.moves.map
zip = six.moves.zip
def _generate_hypercube(samples, dimensions, rng):
"""Returns distinct binary samples of length dimensions
"""
if dimensions > 30:
return np.hstack([_generate_hypercube(samples, dimensions - 30, rng),
_generate_hypercube(samples, 30, rng)])
out = astype(sample_without_replacement(2 ** dimensions, samples,
random_state=rng),
dtype='>u4', copy=False)
out = np.unpackbits(out.view('>u1')).reshape((-1, 32))[:, -dimensions:]
return out
def make_classification(n_samples=100, n_features=20, n_informative=2,
n_redundant=2, n_repeated=0, n_classes=2,
n_clusters_per_class=2, weights=None, flip_y=0.01,
class_sep=1.0, hypercube=True, shift=0.0, scale=1.0,
shuffle=True, random_state=None):
"""Generate a random n-class classification problem.
This initially creates clusters of points normally distributed (std=1)
about vertices of a `2 * class_sep`-sided hypercube, and assigns an equal
number of clusters to each class. It introduces interdependence between
these features and adds various types of further noise to the data.
Prior to shuffling, `X` stacks a number of these primary "informative"
features, "redundant" linear combinations of these, "repeated" duplicates
of sampled features, and arbitrary noise for and remaining features.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features. These comprise `n_informative`
informative features, `n_redundant` redundant features, `n_repeated`
duplicated features and `n_features-n_informative-n_redundant-
n_repeated` useless features drawn at random.
n_informative : int, optional (default=2)
The number of informative features. Each class is composed of a number
of gaussian clusters each located around the vertices of a hypercube
in a subspace of dimension `n_informative`. For each cluster,
informative features are drawn independently from N(0, 1) and then
randomly linearly combined within each cluster in order to add
covariance. The clusters are then placed on the vertices of the
hypercube.
n_redundant : int, optional (default=2)
The number of redundant features. These features are generated as
random linear combinations of the informative features.
n_repeated : int, optional (default=0)
The number of duplicated features, drawn randomly from the informative
and the redundant features.
n_classes : int, optional (default=2)
The number of classes (or labels) of the classification problem.
n_clusters_per_class : int, optional (default=2)
The number of clusters per class.
weights : list of floats or None (default=None)
The proportions of samples assigned to each class. If None, then
classes are balanced. Note that if `len(weights) == n_classes - 1`,
then the last class weight is automatically inferred.
More than `n_samples` samples may be returned if the sum of `weights`
exceeds 1.
flip_y : float, optional (default=0.01)
The fraction of samples whose class are randomly exchanged.
class_sep : float, optional (default=1.0)
The factor multiplying the hypercube dimension.
hypercube : boolean, optional (default=True)
If True, the clusters are put on the vertices of a hypercube. If
False, the clusters are put on the vertices of a random polytope.
shift : float, array of shape [n_features] or None, optional (default=0.0)
Shift features by the specified value. If None, then features
are shifted by a random value drawn in [-class_sep, class_sep].
scale : float, array of shape [n_features] or None, optional (default=1.0)
Multiply features by the specified value. If None, then features
are scaled by a random value drawn in [1, 100]. Note that scaling
happens after shifting.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for class membership of each sample.
Notes
-----
The algorithm is adapted from Guyon [1] and was designed to generate
the "Madelon" dataset.
References
----------
.. [1] I. Guyon, "Design of experiments for the NIPS 2003 variable
selection benchmark", 2003.
See also
--------
make_blobs: simplified variant
make_multilabel_classification: unrelated generator for multilabel tasks
"""
generator = check_random_state(random_state)
# Count features, clusters and samples
if n_informative + n_redundant + n_repeated > n_features:
raise ValueError("Number of informative, redundant and repeated "
"features must sum to less than the number of total"
" features")
if 2 ** n_informative < n_classes * n_clusters_per_class:
raise ValueError("n_classes * n_clusters_per_class must"
" be smaller or equal 2 ** n_informative")
if weights and len(weights) not in [n_classes, n_classes - 1]:
raise ValueError("Weights specified but incompatible with number "
"of classes.")
n_useless = n_features - n_informative - n_redundant - n_repeated
n_clusters = n_classes * n_clusters_per_class
if weights and len(weights) == (n_classes - 1):
weights.append(1.0 - sum(weights))
if weights is None:
weights = [1.0 / n_classes] * n_classes
weights[-1] = 1.0 - sum(weights[:-1])
# Distribute samples among clusters by weight
n_samples_per_cluster = []
for k in range(n_clusters):
n_samples_per_cluster.append(int(n_samples * weights[k % n_classes]
/ n_clusters_per_class))
for i in range(n_samples - sum(n_samples_per_cluster)):
n_samples_per_cluster[i % n_clusters] += 1
# Initialize X and y
X = np.zeros((n_samples, n_features))
y = np.zeros(n_samples, dtype=np.int)
# Build the polytope whose vertices become cluster centroids
centroids = _generate_hypercube(n_clusters, n_informative,
generator).astype(float)
centroids *= 2 * class_sep
centroids -= class_sep
if not hypercube:
centroids *= generator.rand(n_clusters, 1)
centroids *= generator.rand(1, n_informative)
# Initially draw informative features from the standard normal
X[:, :n_informative] = generator.randn(n_samples, n_informative)
# Create each cluster; a variant of make_blobs
stop = 0
for k, centroid in enumerate(centroids):
start, stop = stop, stop + n_samples_per_cluster[k]
y[start:stop] = k % n_classes # assign labels
X_k = X[start:stop, :n_informative] # slice a view of the cluster
A = 2 * generator.rand(n_informative, n_informative) - 1
X_k[...] = np.dot(X_k, A) # introduce random covariance
X_k += centroid # shift the cluster to a vertex
# Create redundant features
if n_redundant > 0:
B = 2 * generator.rand(n_informative, n_redundant) - 1
X[:, n_informative:n_informative + n_redundant] = \
np.dot(X[:, :n_informative], B)
# Repeat some features
if n_repeated > 0:
n = n_informative + n_redundant
indices = ((n - 1) * generator.rand(n_repeated) + 0.5).astype(np.intp)
X[:, n:n + n_repeated] = X[:, indices]
# Fill useless features
if n_useless > 0:
X[:, -n_useless:] = generator.randn(n_samples, n_useless)
# Randomly replace labels
if flip_y >= 0.0:
flip_mask = generator.rand(n_samples) < flip_y
y[flip_mask] = generator.randint(n_classes, size=flip_mask.sum())
# Randomly shift and scale
if shift is None:
shift = (2 * generator.rand(n_features) - 1) * class_sep
X += shift
if scale is None:
scale = 1 + 100 * generator.rand(n_features)
X *= scale
if shuffle:
# Randomly permute samples
X, y = util_shuffle(X, y, random_state=generator)
# Randomly permute features
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
return X, y
def make_multilabel_classification(n_samples=100, n_features=20, n_classes=5,
n_labels=2, length=50, allow_unlabeled=True,
sparse=False, return_indicator='dense',
return_distributions=False,
random_state=None):
"""Generate a random multilabel classification problem.
For each sample, the generative process is:
- pick the number of labels: n ~ Poisson(n_labels)
- n times, choose a class c: c ~ Multinomial(theta)
- pick the document length: k ~ Poisson(length)
- k times, choose a word: w ~ Multinomial(theta_c)
In the above process, rejection sampling is used to make sure that
n is never zero or more than `n_classes`, and that the document length
is never zero. Likewise, we reject classes which have already been chosen.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=20)
The total number of features.
n_classes : int, optional (default=5)
The number of classes of the classification problem.
n_labels : int, optional (default=2)
The average number of labels per instance. More precisely, the number
of labels per sample is drawn from a Poisson distribution with
``n_labels`` as its expected value, but samples are bounded (using
rejection sampling) by ``n_classes``, and must be nonzero if
``allow_unlabeled`` is False.
length : int, optional (default=50)
The sum of the features (number of words if documents) is drawn from
a Poisson distribution with this expected value.
allow_unlabeled : bool, optional (default=True)
If ``True``, some instances might not belong to any class.
sparse : bool, optional (default=False)
If ``True``, return a sparse feature matrix
.. versionadded:: 0.17
parameter to allow *sparse* output.
return_indicator : 'dense' (default) | 'sparse' | False
If ``dense`` return ``Y`` in the dense binary indicator format. If
``'sparse'`` return ``Y`` in the sparse binary indicator format.
``False`` returns a list of lists of labels.
return_distributions : bool, optional (default=False)
If ``True``, return the prior class probability and conditional
probabilities of features given classes, from which the data was
drawn.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
Y : array or sparse CSR matrix of shape [n_samples, n_classes]
The label sets.
p_c : array, shape [n_classes]
The probability of each class being drawn. Only returned if
``return_distributions=True``.
p_w_c : array, shape [n_features, n_classes]
The probability of each feature being drawn given each class.
Only returned if ``return_distributions=True``.
"""
generator = check_random_state(random_state)
p_c = generator.rand(n_classes)
p_c /= p_c.sum()
cumulative_p_c = np.cumsum(p_c)
p_w_c = generator.rand(n_features, n_classes)
p_w_c /= np.sum(p_w_c, axis=0)
def sample_example():
_, n_classes = p_w_c.shape
# pick a nonzero number of labels per document by rejection sampling
y_size = n_classes + 1
while (not allow_unlabeled and y_size == 0) or y_size > n_classes:
y_size = generator.poisson(n_labels)
# pick n classes
y = set()
while len(y) != y_size:
# pick a class with probability P(c)
c = np.searchsorted(cumulative_p_c,
generator.rand(y_size - len(y)))
y.update(c)
y = list(y)
# pick a non-zero document length by rejection sampling
n_words = 0
while n_words == 0:
n_words = generator.poisson(length)
# generate a document of length n_words
if len(y) == 0:
# if sample does not belong to any class, generate noise word
words = generator.randint(n_features, size=n_words)
return words, y
# sample words with replacement from selected classes
cumulative_p_w_sample = p_w_c.take(y, axis=1).sum(axis=1).cumsum()
cumulative_p_w_sample /= cumulative_p_w_sample[-1]
words = np.searchsorted(cumulative_p_w_sample, generator.rand(n_words))
return words, y
X_indices = array.array('i')
X_indptr = array.array('i', [0])
Y = []
for i in range(n_samples):
words, y = sample_example()
X_indices.extend(words)
X_indptr.append(len(X_indices))
Y.append(y)
X_data = np.ones(len(X_indices), dtype=np.float64)
X = sp.csr_matrix((X_data, X_indices, X_indptr),
shape=(n_samples, n_features))
X.sum_duplicates()
if not sparse:
X = X.toarray()
# return_indicator can be True due to backward compatibility
if return_indicator in (True, 'sparse', 'dense'):
lb = MultiLabelBinarizer(sparse_output=(return_indicator == 'sparse'))
Y = lb.fit([range(n_classes)]).transform(Y)
elif return_indicator is not False:
raise ValueError("return_indicator must be either 'sparse', 'dense' "
'or False.')
if return_distributions:
return X, Y, p_c, p_w_c
return X, Y
def make_hastie_10_2(n_samples=12000, random_state=None):
"""Generates data for binary classification used in
Hastie et al. 2009, Example 10.2.
The ten features are standard independent Gaussian and
the target ``y`` is defined by::
y[i] = 1 if np.sum(X[i] ** 2) > 9.34 else -1
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=12000)
The number of samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 10]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
See also
--------
make_gaussian_quantiles: a generalization of this dataset approach
"""
rs = check_random_state(random_state)
shape = (n_samples, 10)
X = rs.normal(size=shape).reshape(shape)
y = ((X ** 2.0).sum(axis=1) > 9.34).astype(np.float64)
y[y == 0.0] = -1.0
return X, y
def make_regression(n_samples=100, n_features=100, n_informative=10,
n_targets=1, bias=0.0, effective_rank=None,
tail_strength=0.5, noise=0.0, shuffle=True, coef=False,
random_state=None):
"""Generate a random regression problem.
The input set can either be well conditioned (by default) or have a low
rank-fat tail singular profile. See :func:`make_low_rank_matrix` for
more details.
The output is generated by applying a (potentially biased) random linear
regression model with `n_informative` nonzero regressors to the previously
generated input and some gaussian centered noise with some adjustable
scale.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
n_informative : int, optional (default=10)
The number of informative features, i.e., the number of features used
to build the linear model used to generate the output.
n_targets : int, optional (default=1)
The number of regression targets, i.e., the dimension of the y output
vector associated with a sample. By default, the output is a scalar.
bias : float, optional (default=0.0)
The bias term in the underlying linear model.
effective_rank : int or None, optional (default=None)
if not None:
The approximate number of singular vectors required to explain most
of the input data by linear combinations. Using this kind of
singular spectrum in the input allows the generator to reproduce
the correlations often observed in practice.
if None:
The input set is well conditioned, centered and gaussian with
unit variance.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile if `effective_rank` is not None.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
shuffle : boolean, optional (default=True)
Shuffle the samples and the features.
coef : boolean, optional (default=False)
If True, the coefficients of the underlying linear model are returned.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples] or [n_samples, n_targets]
The output values.
coef : array of shape [n_features] or [n_features, n_targets], optional
The coefficient of the underlying linear model. It is returned only if
coef is True.
"""
n_informative = min(n_features, n_informative)
generator = check_random_state(random_state)
if effective_rank is None:
# Randomly generate a well conditioned input set
X = generator.randn(n_samples, n_features)
else:
# Randomly generate a low rank, fat tail input set
X = make_low_rank_matrix(n_samples=n_samples,
n_features=n_features,
effective_rank=effective_rank,
tail_strength=tail_strength,
random_state=generator)
# Generate a ground truth model with only n_informative features being non
# zeros (the other features are not correlated to y and should be ignored
# by a sparsifying regularizers such as L1 or elastic net)
ground_truth = np.zeros((n_features, n_targets))
ground_truth[:n_informative, :] = 100 * generator.rand(n_informative,
n_targets)
y = np.dot(X, ground_truth) + bias
# Add noise
if noise > 0.0:
y += generator.normal(scale=noise, size=y.shape)
# Randomly permute samples and features
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
indices = np.arange(n_features)
generator.shuffle(indices)
X[:, :] = X[:, indices]
ground_truth = ground_truth[indices]
y = np.squeeze(y)
if coef:
return X, y, np.squeeze(ground_truth)
else:
return X, y
def make_circles(n_samples=100, shuffle=True, noise=None, random_state=None,
factor=.8):
"""Make a large circle containing a smaller circle in 2d.
A simple toy dataset to visualize clustering and classification
algorithms.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
factor : double < 1 (default=.8)
Scale factor between inner and outer circle.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
if factor > 1 or factor < 0:
raise ValueError("'factor' has to be between 0 and 1.")
generator = check_random_state(random_state)
# so as not to have the first point = last point, we add one and then
# remove it.
linspace = np.linspace(0, 2 * np.pi, n_samples // 2 + 1)[:-1]
outer_circ_x = np.cos(linspace)
outer_circ_y = np.sin(linspace)
inner_circ_x = outer_circ_x * factor
inner_circ_y = outer_circ_y * factor
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples // 2, dtype=np.intp),
np.ones(n_samples // 2, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_moons(n_samples=100, shuffle=True, noise=None, random_state=None):
"""Make two interleaving half circles
A simple toy dataset to visualize clustering and classification
algorithms. Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points generated.
shuffle : bool, optional (default=True)
Whether to shuffle the samples.
noise : double or None (default=None)
Standard deviation of Gaussian noise added to the data.
Returns
-------
X : array of shape [n_samples, 2]
The generated samples.
y : array of shape [n_samples]
The integer labels (0 or 1) for class membership of each sample.
"""
n_samples_out = n_samples // 2
n_samples_in = n_samples - n_samples_out
generator = check_random_state(random_state)
outer_circ_x = np.cos(np.linspace(0, np.pi, n_samples_out))
outer_circ_y = np.sin(np.linspace(0, np.pi, n_samples_out))
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - .5
X = np.vstack((np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y))).T
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
if noise is not None:
X += generator.normal(scale=noise, size=X.shape)
return X, y
def make_blobs(n_samples=100, n_features=2, centers=3, cluster_std=1.0,
center_box=(-10.0, 10.0), shuffle=True, random_state=None):
"""Generate isotropic Gaussian blobs for clustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The total number of points equally divided among clusters.
n_features : int, optional (default=2)
The number of features for each sample.
centers : int or array of shape [n_centers, n_features], optional
(default=3)
The number of centers to generate, or the fixed center locations.
cluster_std : float or sequence of floats, optional (default=1.0)
The standard deviation of the clusters.
center_box : pair of floats (min, max), optional (default=(-10.0, 10.0))
The bounding box for each cluster center when centers are
generated at random.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for cluster membership of each sample.
Examples
--------
>>> from sklearn.datasets.samples_generator import make_blobs
>>> X, y = make_blobs(n_samples=10, centers=3, n_features=2,
... random_state=0)
>>> print(X.shape)
(10, 2)
>>> y
array([0, 0, 1, 0, 2, 2, 2, 1, 1, 0])
See also
--------
make_classification: a more intricate variant
"""
generator = check_random_state(random_state)
if isinstance(centers, numbers.Integral):
centers = generator.uniform(center_box[0], center_box[1],
size=(centers, n_features))
else:
centers = check_array(centers)
n_features = centers.shape[1]
if isinstance(cluster_std, numbers.Real):
cluster_std = np.ones(len(centers)) * cluster_std
X = []
y = []
n_centers = centers.shape[0]
n_samples_per_center = [int(n_samples // n_centers)] * n_centers
for i in range(n_samples % n_centers):
n_samples_per_center[i] += 1
for i, (n, std) in enumerate(zip(n_samples_per_center, cluster_std)):
X.append(centers[i] + generator.normal(scale=std,
size=(n, n_features)))
y += [i] * n
X = np.concatenate(X)
y = np.array(y)
if shuffle:
indices = np.arange(n_samples)
generator.shuffle(indices)
X = X[indices]
y = y[indices]
return X, y
def make_friedman1(n_samples=100, n_features=10, noise=0.0, random_state=None):
"""Generate the "Friedman \#1" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are independent features uniformly distributed on the interval
[0, 1]. The output `y` is created according to the formula::
y(X) = 10 * sin(pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * N(0, 1).
Out of the `n_features` features, only 5 are actually used to compute
`y`. The remaining features are independent of `y`.
The number of features has to be >= 5.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features. Should be at least 5.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
if n_features < 5:
raise ValueError("n_features must be at least five.")
generator = check_random_state(random_state)
X = generator.rand(n_samples, n_features)
y = 10 * np.sin(np.pi * X[:, 0] * X[:, 1]) + 20 * (X[:, 2] - 0.5) ** 2 \
+ 10 * X[:, 3] + 5 * X[:, 4] + noise * generator.randn(n_samples)
return X, y
def make_friedman2(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#2" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = (X[:, 0] ** 2 + (X[:, 1] * X[:, 2] \
- 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = (X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) ** 2) ** 0.5 \
+ noise * generator.randn(n_samples)
return X, y
def make_friedman3(n_samples=100, noise=0.0, random_state=None):
"""Generate the "Friedman \#3" regression problem
This dataset is described in Friedman [1] and Breiman [2].
Inputs `X` are 4 independent features uniformly distributed on the
intervals::
0 <= X[:, 0] <= 100,
40 * pi <= X[:, 1] <= 560 * pi,
0 <= X[:, 2] <= 1,
1 <= X[:, 3] <= 11.
The output `y` is created according to the formula::
y(X) = arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) \
/ X[:, 0]) + noise * N(0, 1).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise applied to the output.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 4]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] J. Friedman, "Multivariate adaptive regression splines", The Annals
of Statistics 19 (1), pages 1-67, 1991.
.. [2] L. Breiman, "Bagging predictors", Machine Learning 24,
pages 123-140, 1996.
"""
generator = check_random_state(random_state)
X = generator.rand(n_samples, 4)
X[:, 0] *= 100
X[:, 1] *= 520 * np.pi
X[:, 1] += 40 * np.pi
X[:, 3] *= 10
X[:, 3] += 1
y = np.arctan((X[:, 1] * X[:, 2] - 1 / (X[:, 1] * X[:, 3])) / X[:, 0]) \
+ noise * generator.randn(n_samples)
return X, y
def make_low_rank_matrix(n_samples=100, n_features=100, effective_rank=10,
tail_strength=0.5, random_state=None):
"""Generate a mostly low rank matrix with bell-shaped singular values
Most of the variance can be explained by a bell-shaped curve of width
effective_rank: the low rank part of the singular values profile is::
(1 - tail_strength) * exp(-1.0 * (i / effective_rank) ** 2)
The remaining singular values' tail is fat, decreasing as::
tail_strength * exp(-0.1 * i / effective_rank).
The low rank part of the profile can be considered the structured
signal part of the data while the tail can be considered the noisy
part of the data that cannot be summarized by a low number of linear
components (singular vectors).
This kind of singular profiles is often seen in practice, for instance:
- gray level pictures of faces
- TF-IDF vectors of text documents crawled from the web
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=100)
The number of features.
effective_rank : int, optional (default=10)
The approximate number of singular vectors required to explain most of
the data by linear combinations.
tail_strength : float between 0.0 and 1.0, optional (default=0.5)
The relative importance of the fat noisy tail of the singular values
profile.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The matrix.
"""
generator = check_random_state(random_state)
n = min(n_samples, n_features)
# Random (ortho normal) vectors
u, _ = linalg.qr(generator.randn(n_samples, n), mode='economic')
v, _ = linalg.qr(generator.randn(n_features, n), mode='economic')
# Index of the singular values
singular_ind = np.arange(n, dtype=np.float64)
# Build the singular profile by assembling signal and noise components
low_rank = ((1 - tail_strength) *
np.exp(-1.0 * (singular_ind / effective_rank) ** 2))
tail = tail_strength * np.exp(-0.1 * singular_ind / effective_rank)
s = np.identity(n) * (low_rank + tail)
return np.dot(np.dot(u, s), v.T)
def make_sparse_coded_signal(n_samples, n_components, n_features,
n_nonzero_coefs, random_state=None):
"""Generate a signal as a sparse combination of dictionary elements.
Returns a matrix Y = DX, such as D is (n_features, n_components),
X is (n_components, n_samples) and each column of X has exactly
n_nonzero_coefs non-zero elements.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int
number of samples to generate
n_components : int,
number of components in the dictionary
n_features : int
number of features of the dataset to generate
n_nonzero_coefs : int
number of active (non-zero) coefficients in each sample
random_state : int or RandomState instance, optional (default=None)
seed used by the pseudo random number generator
Returns
-------
data : array of shape [n_features, n_samples]
The encoded signal (Y).
dictionary : array of shape [n_features, n_components]
The dictionary with normalized components (D).
code : array of shape [n_components, n_samples]
The sparse code such that each column of this matrix has exactly
n_nonzero_coefs non-zero items (X).
"""
generator = check_random_state(random_state)
# generate dictionary
D = generator.randn(n_features, n_components)
D /= np.sqrt(np.sum((D ** 2), axis=0))
# generate code
X = np.zeros((n_components, n_samples))
for i in range(n_samples):
idx = np.arange(n_components)
generator.shuffle(idx)
idx = idx[:n_nonzero_coefs]
X[idx, i] = generator.randn(n_nonzero_coefs)
# encode signal
Y = np.dot(D, X)
return map(np.squeeze, (Y, D, X))
def make_sparse_uncorrelated(n_samples=100, n_features=10, random_state=None):
"""Generate a random regression problem with sparse uncorrelated design
This dataset is described in Celeux et al [1]. as::
X ~ N(0, 1)
y(X) = X[:, 0] + 2 * X[:, 1] - 2 * X[:, 2] - 1.5 * X[:, 3]
Only the first 4 features are informative. The remaining features are
useless.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of samples.
n_features : int, optional (default=10)
The number of features.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The input samples.
y : array of shape [n_samples]
The output values.
References
----------
.. [1] G. Celeux, M. El Anbari, J.-M. Marin, C. P. Robert,
"Regularization in regression: comparing Bayesian and frequentist
methods in a poorly informative situation", 2009.
"""
generator = check_random_state(random_state)
X = generator.normal(loc=0, scale=1, size=(n_samples, n_features))
y = generator.normal(loc=(X[:, 0] +
2 * X[:, 1] -
2 * X[:, 2] -
1.5 * X[:, 3]), scale=np.ones(n_samples))
return X, y
def make_spd_matrix(n_dim, random_state=None):
"""Generate a random symmetric, positive-definite matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_dim : int
The matrix dimension.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_dim, n_dim]
The random symmetric, positive-definite matrix.
See also
--------
make_sparse_spd_matrix
"""
generator = check_random_state(random_state)
A = generator.rand(n_dim, n_dim)
U, s, V = linalg.svd(np.dot(A.T, A))
X = np.dot(np.dot(U, 1.0 + np.diag(generator.rand(n_dim))), V)
return X
def make_sparse_spd_matrix(dim=1, alpha=0.95, norm_diag=False,
smallest_coef=.1, largest_coef=.9,
random_state=None):
"""Generate a sparse symmetric definite positive matrix.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
dim : integer, optional (default=1)
The size of the random matrix to generate.
alpha : float between 0 and 1, optional (default=0.95)
The probability that a coefficient is zero (see notes). Larger values
enforce more sparsity.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
largest_coef : float between 0 and 1, optional (default=0.9)
The value of the largest coefficient.
smallest_coef : float between 0 and 1, optional (default=0.1)
The value of the smallest coefficient.
norm_diag : boolean, optional (default=False)
Whether to normalize the output matrix to make the leading diagonal
elements all 1
Returns
-------
prec : sparse matrix of shape (dim, dim)
The generated matrix.
Notes
-----
The sparsity is actually imposed on the cholesky factor of the matrix.
Thus alpha does not translate directly into the filling fraction of
the matrix itself.
See also
--------
make_spd_matrix
"""
random_state = check_random_state(random_state)
chol = -np.eye(dim)
aux = random_state.rand(dim, dim)
aux[aux < alpha] = 0
aux[aux > alpha] = (smallest_coef
+ (largest_coef - smallest_coef)
* random_state.rand(np.sum(aux > alpha)))
aux = np.tril(aux, k=-1)
# Permute the lines: we don't want to have asymmetries in the final
# SPD matrix
permutation = random_state.permutation(dim)
aux = aux[permutation].T[permutation]
chol += aux
prec = np.dot(chol.T, chol)
if norm_diag:
# Form the diagonal vector into a row matrix
d = np.diag(prec).reshape(1, prec.shape[0])
d = 1. / np.sqrt(d)
prec *= d
prec *= d.T
return prec
def make_swiss_roll(n_samples=100, noise=0.0, random_state=None):
"""Generate a swiss roll dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
Notes
-----
The algorithm is from Marsland [1].
References
----------
.. [1] S. Marsland, "Machine Learning: An Algorithmic Perspective",
Chapter 10, 2009.
http://seat.massey.ac.nz/personal/s.r.marsland/Code/10/lle.py
"""
generator = check_random_state(random_state)
t = 1.5 * np.pi * (1 + 2 * generator.rand(1, n_samples))
x = t * np.cos(t)
y = 21 * generator.rand(1, n_samples)
z = t * np.sin(t)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_s_curve(n_samples=100, noise=0.0, random_state=None):
"""Generate an S curve dataset.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
n_samples : int, optional (default=100)
The number of sample points on the S curve.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, 3]
The points.
t : array of shape [n_samples]
The univariate position of the sample according to the main dimension
of the points in the manifold.
"""
generator = check_random_state(random_state)
t = 3 * np.pi * (generator.rand(1, n_samples) - 0.5)
x = np.sin(t)
y = 2.0 * generator.rand(1, n_samples)
z = np.sign(t) * (np.cos(t) - 1)
X = np.concatenate((x, y, z))
X += noise * generator.randn(3, n_samples)
X = X.T
t = np.squeeze(t)
return X, t
def make_gaussian_quantiles(mean=None, cov=1., n_samples=100,
n_features=2, n_classes=3,
shuffle=True, random_state=None):
"""Generate isotropic Gaussian and label samples by quantile
This classification dataset is constructed by taking a multi-dimensional
standard normal distribution and defining classes separated by nested
concentric multi-dimensional spheres such that roughly equal numbers of
samples are in each class (quantiles of the :math:`\chi^2` distribution).
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
mean : array of shape [n_features], optional (default=None)
The mean of the multi-dimensional normal distribution.
If None then use the origin (0, 0, ...).
cov : float, optional (default=1.)
The covariance matrix will be this value times the unit matrix. This
dataset only produces symmetric normal distributions.
n_samples : int, optional (default=100)
The total number of points equally divided among classes.
n_features : int, optional (default=2)
The number of features for each sample.
n_classes : int, optional (default=3)
The number of classes
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape [n_samples, n_features]
The generated samples.
y : array of shape [n_samples]
The integer labels for quantile membership of each sample.
Notes
-----
The dataset is from Zhu et al [1].
References
----------
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
if n_samples < n_classes:
raise ValueError("n_samples must be at least n_classes")
generator = check_random_state(random_state)
if mean is None:
mean = np.zeros(n_features)
else:
mean = np.array(mean)
# Build multivariate normal distribution
X = generator.multivariate_normal(mean, cov * np.identity(n_features),
(n_samples,))
# Sort by distance from origin
idx = np.argsort(np.sum((X - mean[np.newaxis, :]) ** 2, axis=1))
X = X[idx, :]
# Label by quantile
step = n_samples // n_classes
y = np.hstack([np.repeat(np.arange(n_classes), step),
np.repeat(n_classes - 1, n_samples - step * n_classes)])
if shuffle:
X, y = util_shuffle(X, y, random_state=generator)
return X, y
def _shuffle(data, random_state=None):
generator = check_random_state(random_state)
n_rows, n_cols = data.shape
row_idx = generator.permutation(n_rows)
col_idx = generator.permutation(n_cols)
result = data[row_idx][:, col_idx]
return result, row_idx, col_idx
def make_biclusters(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with constant block diagonal structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer
The number of biclusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Dhillon, I. S. (2001, August). Co-clustering documents and
words using bipartite spectral graph partitioning. In Proceedings
of the seventh ACM SIGKDD international conference on Knowledge
discovery and data mining (pp. 269-274). ACM.
See also
--------
make_checkerboard
"""
generator = check_random_state(random_state)
n_rows, n_cols = shape
consts = generator.uniform(minval, maxval, n_clusters)
# row and column clusters of approximately equal sizes
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_clusters,
n_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_clusters,
n_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_clusters):
selector = np.outer(row_labels == i, col_labels == i)
result[selector] += consts[i]
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == c for c in range(n_clusters))
cols = np.vstack(col_labels == c for c in range(n_clusters))
return result, rows, cols
def make_checkerboard(shape, n_clusters, noise=0.0, minval=10,
maxval=100, shuffle=True, random_state=None):
"""Generate an array with block checkerboard structure for
biclustering.
Read more in the :ref:`User Guide <sample_generators>`.
Parameters
----------
shape : iterable (n_rows, n_cols)
The shape of the result.
n_clusters : integer or iterable (n_row_clusters, n_column_clusters)
The number of row and column clusters.
noise : float, optional (default=0.0)
The standard deviation of the gaussian noise.
minval : int, optional (default=10)
Minimum value of a bicluster.
maxval : int, optional (default=100)
Maximum value of a bicluster.
shuffle : boolean, optional (default=True)
Shuffle the samples.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
X : array of shape `shape`
The generated array.
rows : array of shape (n_clusters, X.shape[0],)
The indicators for cluster membership of each row.
cols : array of shape (n_clusters, X.shape[1],)
The indicators for cluster membership of each column.
References
----------
.. [1] Kluger, Y., Basri, R., Chang, J. T., & Gerstein, M. (2003).
Spectral biclustering of microarray data: coclustering genes
and conditions. Genome research, 13(4), 703-716.
See also
--------
make_biclusters
"""
generator = check_random_state(random_state)
if hasattr(n_clusters, "__len__"):
n_row_clusters, n_col_clusters = n_clusters
else:
n_row_clusters = n_col_clusters = n_clusters
# row and column clusters of approximately equal sizes
n_rows, n_cols = shape
row_sizes = generator.multinomial(n_rows,
np.repeat(1.0 / n_row_clusters,
n_row_clusters))
col_sizes = generator.multinomial(n_cols,
np.repeat(1.0 / n_col_clusters,
n_col_clusters))
row_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_row_clusters), row_sizes)))
col_labels = np.hstack(list(np.repeat(val, rep) for val, rep in
zip(range(n_col_clusters), col_sizes)))
result = np.zeros(shape, dtype=np.float64)
for i in range(n_row_clusters):
for j in range(n_col_clusters):
selector = np.outer(row_labels == i, col_labels == j)
result[selector] += generator.uniform(minval, maxval)
if noise > 0:
result += generator.normal(scale=noise, size=result.shape)
if shuffle:
result, row_idx, col_idx = _shuffle(result, random_state)
row_labels = row_labels[row_idx]
col_labels = col_labels[col_idx]
rows = np.vstack(row_labels == label
for label in range(n_row_clusters)
for _ in range(n_col_clusters))
cols = np.vstack(col_labels == label
for _ in range(n_row_clusters)
for label in range(n_col_clusters))
return result, rows, cols
|
rishikksh20/scikit-learn
|
sklearn/datasets/samples_generator.py
|
Python
|
bsd-3-clause
| 56,558
|
[
"Gaussian"
] |
d92bb995f62e25cfec9b5ec3edee0b78359067787bcba92811090235bf784e3b
|
# Copyright 2013 anthony cantor
# This file is part of pyc.
#
# pyc is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pyc is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyc. If not, see <http://www.gnu.org/licenses/>.
from pyc_astvisitor import ASTTxformer
from pyc_astvisitor import ASTVisitor
import pyc_vis
import pyc_parser
from pyc_log import *
from pyc_ir_nodes import *
import pyc_gen_name
from pyc_constants import BadAss
import pyc_lineage
import StringIO
import ast
class InvalidSyntax(Exception):
pass
class InvalidP1(InvalidSyntax):
pass
class InvalidP3(InvalidSyntax):
pass
class AstToIRTxformer(ASTTxformer):
def __init__(self):
ASTTxformer.__init__(self)
def visit_Assign(self, node):
if len(node.targets) != 1:
raise InvalidP1("assign expected to have only one target: %r" % node)
elif node.targets[0].__class__ not in set([ast.Name, ast.Subscript, ast.Attribute]):
raise BadAss("assumed all targets were names, subs or attrs: %r" % ast.dump(node))
elif not isinstance(node.targets[0].ctx, ast.Store):
raise BadAss("why isnt the target context store?: %r" % node)
return ast.Assign(
targets = [pyc_vis.visit(self, node.targets[0])],
value = pyc_vis.visit(self, node.value)
)
def visit_Num(self, node):
return InjectFromInt(
arg = ast.Num(n=node.n)
)
def visit_HasAttr(self, node):
return InjectFromBool(
arg = HasAttr(
obj = pyc_vis.visit(self, node.obj),
attr = pyc_vis.visit(self, node.attr)
)
)
def visit_Print(self, node):
if len(node.values) != 1:
raise InvalidP1("print expected to have only one arg")
return ast.Print(
dest = None,
values = [ pyc_vis.visit(self, node.values[0]) ],
nl = True
)
def gen_name(self):
return pyc_gen_name.new("ir_")
def visit_UnaryOp(self, node):
if isinstance(node.op, ast.Not):
return InjectFromBool(arg = ast.UnaryOp(
op = ast.Not(),
operand = let(
self.gen_name,
rhs = pyc_vis.visit(self, node.operand),
body = lambda name: make_is_true(name)
)
))
elif isinstance(node.op, ast.USub):
return self.visit_UnaryOp_USub(node)
else:
return self.default(node)
def visit_UnaryOp_USub(self, node):
class USubPolySwitch(PolySwitch):
def no_match(self, name_typ_list):
return make_error(
"cant negate %s " % (name_typ_list[0][1])
)
def make_usub(self, op):
return ast.UnaryOp(
op = ast.USub(),
operand = op
)
def int(self, op):
return InjectFromInt(
arg = self.make_usub(ProjectToInt(arg=op) )
)
def bool(self, op):
return InjectFromInt(
arg = self.make_usub(ProjectToBool(arg=op) )
)
#end USubPolySwitch
return let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.operand),
body = lambda name: polyswitch(USubPolySwitch(), var_ref(name))
)
def visit_IfExp(self, node):
return ast.IfExp(
test = let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.test),
body = lambda name: make_is_true(name)
),
body = pyc_vis.visit(self, node.body),
orelse = pyc_vis.visit(self, node.orelse)
)
def visit_If(self, node):
return ast.If(
test = let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.test),
body = lambda name: make_is_true(name)
),
body = [pyc_vis.visit(self, x) for x in node.body],
orelse = [pyc_vis.visit(self, x) for x in node.orelse]
)
def visit_While(self, node):
if len(node.orelse) > 0:
raise InvalidP3("while orelse not supported: %s" % dump(node) )
return ast.While(
test = let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.test),
body = lambda name: make_is_true(name)
),
body = [pyc_vis.visit(self, x) for x in node.body]
)
def visit_Compare(self, node):
if len(node.ops) != 1:
raise BadAss("expected 1 compare op: %s" % dump(node) )
elif not isinstance(node.ops[0], ast.Eq) \
and not isinstance(node.ops[0], ast.NotEq) \
and not isinstance(node.ops[0], ast.Is):
raise BadAss("unexpected compare context: %s" % dump(node) )
elif len(node.comparators) != 1:
raise BadAss("expected 1 comparator: %s" % dump(node) )
class IsPolySwitch(PolySwitch):
def no_match(self, name_typ_list):
return ast.Num(0)
def int_int(self, l, r):
return simple_compare(ProjectToInt(arg=l), ProjectToInt(arg=r))
def bool_bool(self, l, r):
return simple_compare(ProjectToBool(arg=l), ProjectToBool(arg=r))
def big_big(self, l, r):
return simple_compare(ProjectToBig(arg=l), ProjectToBig(arg=r))
#end IsPolySwitch
class CmpPolySwitch(IsPolySwitch):
def int_bool(self, l, r):
return simple_compare(ProjectToInt(arg=l), ProjectToBool(arg=r))
def bool_int(self, l, r):
return simple_compare(ProjectToBool(arg=l), ProjectToInt(arg=r))
def big_big(self, l, r):
return make_call(
'equal',
[ ProjectToBig(arg=l), ProjectToBig(arg=r) ]
)
l_name = self.gen_name()
comp_name = self.gen_name()
ps = IsPolySwitch() if isinstance(node.ops[0], ast.Is) else CmpPolySwitch()
result = let_env(
self.gen_name,
lambda names: InjectFromBool(arg=polyswitch(ps, var_ref(names[0]), var_ref(names[1]))),
pyc_vis.visit(self, node.left),
pyc_vis.visit(self, node.comparators[0])
)
if isinstance(node.ops[0], ast.NotEq):
return InjectFromBool(arg=ast.UnaryOp(
op = ast.Not(),
operand = IsTrue(arg=result)
))
return result
def visit_Call(self, node):
args = [pyc_vis.visit(self, n) for n in node.args]
if isinstance(node.func, ast.Name) \
and node.func.id in set(['input']):
return InjectFromInt(arg=make_call('input', args) )
else:
return self.make_user_call(node)
#yes, this is ugly T_T
#this could be made much cleaner if runtime.c were rewritten in a
#smarter way
def make_user_call(self, node):
obj_name = self.gen_name()
arg_nodes = []
for n in node.args:
arg_nodes.append(pyc_vis.visit(self, n))
return let_env(
self.gen_name,
lambda names: ast.IfExp(
test = simple_compare(
ast.Num(0),
IsClass(arg=var_ref(names[0]))
),
body = ast.IfExp(
test = simple_compare(
ast.Num(0),
IsBoundMethod(arg=var_ref(names[0]))
),
body = ast.IfExp(
test = simple_compare(
ast.Num(0),
IsUnboundMethod(arg=var_ref(names[0]))
),
body = UserCall( #just a normal function call
func = var_ref(names[0]),
args = [var_ref(name) for name in names[1:] ],
kwargs = None,
starargs = None
),
orelse = UserCall( #unbound method call: get function and call
func = InjectFromBig(arg=GetFunction(arg=var_ref(names[0]))),
args = [var_ref(name) for name in names[1:] ],
kwargs = None,
starargs = None
)
),
orelse = UserCall( #bound method call: get function, receiver and call
func = InjectFromBig(arg=GetFunction(arg=var_ref(names[0]))),
args = [InjectFromBig(arg=GetReceiver(arg=var_ref(names[0])))] \
+ [var_ref(name) for name in names[1:] ],
kwargs = None,
starargs = None
)
),
orelse = Let( #object creation: create and call __init__ if exists
name = var_set(obj_name),
rhs = InjectFromBig(arg=CreateObject(arg=var_ref(names[0]))),
body = ast.IfExp(
test = simple_compare(
ast.Num(0),
HasAttr(obj=var_ref(names[0]), attr=ast.Str('__init__'))
),
body = var_ref(obj_name), #no __init__, return object
orelse = Seq( #call __init__, return object
body = [
UserCall(
func = InjectFromBig(arg=GetFunction(
arg = ast.Attribute(
value = var_ref(names[0]),
attr = '__init__',
ctx = ast.Load()
)
)),
args = [ #(object, arg1, ..., argn)
var_ref(name) for name in ([obj_name] + names[1:])
],
kwargs = None,
starargs = None
), #call __init__
var_ref(obj_name)
] #body
) #hasattr('__init__') true
) #if hasattr('__init__')
) #let o = CreateObject(names[0])
),
pyc_vis.visit(self, node.func),
*arg_nodes
)
def visit_Dict(self, node):
d_name = self.gen_name()
elements = []
for (k,v) in zip(node.keys, node.values):
elements.append(make_assign(
ast.Subscript(
value = var_ref(d_name),
slice = ast.Index(pyc_vis.visit(self, k)),
ctx = ast.Store()
),
pyc_vis.visit(self, v))
)
return Let(
name = var_set(d_name),
rhs = InjectFromBig(
arg = DictRef()
),
body = Seq(body = elements + [var_ref(d_name)])
)
def visit_List(self, node):
if not isinstance(node.ctx, ast.Load):
raise BadAss("unexpected context for list: %s" % (ast.dump(node)) )
list_name = self.gen_name()
elements = []
for i in range(0, len(node.elts)):
e = node.elts[i]
elements.append(make_assign(
ast.Subscript(
value = var_ref(list_name),
slice = ast.Index(
InjectFromInt(arg=ast.Num(n=i))
),
ctx = ast.Store()
),
pyc_vis.visit(self, e))
)
return Let(
name = var_set(list_name),
rhs = InjectFromBig(
arg = ListRef(
size = InjectFromInt(arg = ast.Num(n=len(node.elts) ) )
)
),
body = Seq(body = elements + [var_ref(list_name)])
)
def visit_ClassRef(self, node):
return InjectFromBig(
arg = ClassRef(
bases = pyc_vis.visit(self, node.bases)
)
)
def visit_BinOp(self, node):
def unknown_op(node, *args):
raise Exception("unsupported BinOp: %s" % ast.dump(node))
return pyc_vis.dispatch_to_prefix(
self,
'visit_BinOp_',
unknown_op,
node.op,
node
)
def visit_BinOp_Add(self, dummy, node):
class AddPolySwitch(PolySwitch):
def no_match(self, name_typ_list):
return make_error(
"cant add %s to %s" % (
name_typ_list[1][1],
name_typ_list[0][1]
)
)
def add_bools_or_ints(self, l, r):
return ast.BinOp(left = l, op = ast.Add(), right = r)
#int, bool => int, cast(bool, int)
def int_int(self, l, r):
return InjectFromInt(
arg = self.add_bools_or_ints(ProjectToInt(arg=l), ProjectToInt(arg=r))
)
def int_bool(self, l, r):
return InjectFromInt(
arg = self.add_bools_or_ints(ProjectToInt(arg=l), CastBoolToInt(arg=ProjectToBool(arg=r)))
)
def bool_bool(self, l, r):
return InjectFromInt(
arg = self.add_bools_or_ints(
CastBoolToInt(arg=ProjectToBool(arg=l)),
CastBoolToInt(arg=ProjectToBool(arg=r))
)
)
def bool_int(self, l, r):
return InjectFromInt(
arg = self.add_bools_or_ints(
CastBoolToInt(arg=ProjectToBool(arg=l)),
ProjectToInt(arg=r)
)
)
def big_big(self, l, r):
return InjectFromBig(
arg = make_call(
"add",
[ProjectToBig(arg=l), ProjectToBig(arg=r)]
)
)
#AddPolyswitch
return let_env(
self.gen_name,
lambda names: polyswitch(AddPolySwitch(), var_ref(names[0]), var_ref(names[1])),
pyc_vis.visit(self, node.left),
pyc_vis.visit(self, node.right)
)
def visit_BoolOp(self, node):
def unknown_op(node, *args):
raise Exception("unsupported BoolOp: %s" % ast.dump(node))
return pyc_vis.dispatch_to_prefix(
self,
'visit_BoolOp_',
unknown_op,
node.op,
node
)
def visit_BoolOp_And(self, dummy, node):
if len(node.values) != 2:
raise BadAss("expected 2 operands to bool op: %s" % ast.dump(node))
return let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.values[0]),
body = lambda name: ast.IfExp(
test = make_is_true(name),
body = pyc_vis.visit(self, node.values[1]),
orelse = var_ref(name)
)
)
def visit_BoolOp_Or(self, dummy, node):
if len(node.values) != 2:
raise BadAss("expected 2 operands to bool op: %s" % ast.dump(node))
return let(
name_gen = self.gen_name,
rhs = pyc_vis.visit(self, node.values[0]),
body = lambda name: ast.IfExp(
test = make_is_true(name),
body = var_ref(name),
orelse = pyc_vis.visit(self, node.values[1])
)
)
def visit_FunctionDef(self, node):
return make_assign(
var_set(node.name),
Bloc(
args = pyc_vis.visit(self, node.args),
body = [pyc_vis.visit(self, n) for n in node.body],
klass = ast.FunctionDef
)
)
def visit_Lambda(self, node):
return Bloc(
args = pyc_vis.visit(self, node.args),
body = [ast.Return(
value = pyc_vis.visit(self, node.body)
)],
klass = ast.Lambda
)
def txform(astree, **kwargs):
v = AstToIRTxformer()
#v.log = log
if 'tracer' in kwargs:
v.tracer = kwargs['tracer']
return pyc_vis.walk(v, astree)
|
cantora/pyc
|
pyc_ir.py
|
Python
|
gpl-3.0
| 13,196
|
[
"VisIt"
] |
ec8593d653e3915b1d98fcb49b2d84487603089ddd79a790d4038c0986263dec
|
#!/usr/bin/env python
import randopt as ro
import time
def loss(x):
# time.sleep(1)
return x**2
if __name__ == '__main__':
e = ro.Experiment('simple_example', {
'alpha': ro.Gaussian(mean=0.0, std=1.0, dtype='float'),
})
# Sampling parameters
for i in range(100):
e.sample('alpha')
res = loss(e.alpha)
print('Result: ', res)
e.add_result(res)
# Manually setting parameters
e.alpha = 0.00001
res = loss(e.alpha)
e.add_result(res)
# Search over all experiments results, including ones from previous runs
opt = e.minimum()
print('Best result: ', opt.result, ' with params: ', opt.params)
|
seba-1511/randopt
|
examples/simple.py
|
Python
|
apache-2.0
| 690
|
[
"Gaussian"
] |
7958d5006bd1e76c743b1f623c7ffae06d6029b3e31e55920f4a6f4f06c54789
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from scipy import stats
from pandas.tools.plotting import scatter_matrix
from scipy.optimize import curve_fit
from matplotlib.colors import LogNorm
df = pd.read_csv('/Users/tylern/Homework/PHYS723/project/LHC/CMS_data/MuRun.csv')
#Make sure events are neutral
#if first event is positive and the second is negative
#or the second is positive and the first is negative
df1 = df[df.Q1 == 1]
df1 = df1[df1.Q2 == -1]
df2 = df[df.Q1 == -1]
df2 = df2[df2.Q2 == 1]
frames = [df1, df2]
df = pd.concat(frames)
df = df[df.Type1 == 'G']
df = df[df.Type2 == 'G']
#df = df[np.sqrt(df.px1**2 + df.py1**2) + np.sqrt(df.px2**2 + df.py2**2) < 50]
mass_Up = 9.45
def poly(x, c1, c2, c3, c4):
return c1*x*x*x + c2*x*x + c3*x + c4
def big_poly(x, c1, c2, c3, c4, c5, c6, c7, c8):
return c8*x**7 + c7*x**6 + c6*x**5 + c5*x**4 + c4*x**3 + c3*x**2 + c2*x + c1
def gaussian(x, mu, sig, const):
return const * 1/(sig*np.sqrt(2*np.pi)) * np.exp(-(x - mu)**2 / 2*sig**2)
def gaus_poly(x, mu, sig, cont, c1, c2, c3, c4):
return poly(x, c1, c2, c3, c4) + gaussian(x, mu, sig, cont)
def big_poly_gaus(x, mu, sig, cont, c1, c2, c3, c4, c5, c6, c7, c8):
return gaussian(x, mu, sig, cont) + big_poly(x, c1, c2, c3, c4, c5, c6, c7, c8)
def chi_2(ys,yknown):
total = 0
for i in xrange(len(yknown)):
temp = (ys[i]-yknown[i])**2.0
if yknown[i] == 0:
total += 1
else :
total += temp/yknown[i]
return total/len(yknown)
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
upsilon = df[df.M < 14]
upsilon = upsilon[upsilon.M > 6]
mass = upsilon.M
num_bins = 400
hist, bin_edges = np.histogram(mass,bins=num_bins)
xdata = 0.5*(bin_edges[1:]+bin_edges[:-1])
ydata = hist
plt.hist(mass, num_bins, histtype=u'stepfilled',facecolor='g' , alpha=0.45)
popt_1, pcov_1 = curve_fit(poly, xdata, ydata)
x0 = np.array([9.45,10.7,1,popt_1[0],popt_1[1],popt_1[2],popt_1[3]])
popt_1, pcov_1 = curve_fit(gaus_poly, xdata, ydata,p0=x0)
c2 = chi_2(gaus_poly(xdata, *popt_1),ydata)
plt.plot(xdata,gaus_poly(xdata,*popt_1),'b--', lw=4,
label=r'$\mathrm{Poly\ bkg\ gaus\ peak\ : \ \chi^{2} = %.4f}$' %(c2))
plt.plot(xdata,poly(xdata,*popt_1[3:]),'g--', lw=4)
signal_line = lambda x : gaus_poly(x,*popt_1) - poly(x, *popt_1[3:])
signal = []
for i in xrange(num_bins):
temp = ydata[i] - signal_line(xdata[i])
signal.append(temp)
signal = []
for i in xrange(num_bins):
temp = ydata[i] - poly(xdata[i],*popt_1[3:])
signal.append(temp)
plt.xlim((np.min(xdata),np.max(xdata)))
plt.legend(loc=0)
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.savefig('U_hist.pdf')
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
ydata = signal
plt.scatter(xdata,ydata,marker='o',color='g')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata,p0=[9.45,12,1])
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'g-', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f \ GeV}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.20/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
plt.axvline(x=(mean - 3.0*sigma),color='g')
plt.axvline(x=(mean + 3.0*sigma),color='g')
mean_U = mean
sigma_U = sigma
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('U_peak.pdf')
signal1 = []
for i in xrange(num_bins):
temp = ydata[i] - gaussian(xdata[i],*popt_1)
signal1.append(temp)
ydata = signal1
plt.scatter(xdata, signal1,marker='o', color='b')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata, p0=[10,10.7,1],maxfev=8000)
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'b', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.30/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
mean_Up = mean
sigma_Up = sigma
plt.axvline(x=(mean - 3.0*sigma),color='b')
plt.axvline(x=(mean + 3.0*sigma),color='b')
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('Up_peak.pdf')
'''
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
signal1 = []
for i in xrange(num_bins):
temp = ydata[i] - gaussian(xdata[i],*popt_1)
signal1.append(temp)
ydata = signal1
plt.scatter(xdata, signal1,marker='o', color='b')
popt_1, pcov_1 = curve_fit(gaussian, xdata, ydata, p0=[10,10.7,1],maxfev=80000)
perr_1 = np.sqrt(np.diag(pcov_1))
plt.plot(xdata,gaussian(xdata,*popt_1),'b', lw=4,
label=r'$\mathrm{Mass=%.4f \pm %.4f \ GeV,\ \Gamma=%.4f \pm %.4f}$'
%(popt_1[0], perr_1[0], popt_1[1]*(2.0*np.sqrt(2.0 * np.log(2))), perr_1[1]))
mean,width = popt_1[0],popt_1[1]
sigma = 0.30/3.0 #width*(2.0*np.sqrt(2.0 * np.log(2)))
mean_Up = mean
sigma_Up = sigma
plt.axvline(x=(mean - 3.0*sigma),color='b')
plt.axvline(x=(mean + 3.0*sigma),color='b')
plt.xlim((np.min(xdata),np.max(xdata)))
plt.xlabel(r'Mass (GeV)', fontsize=20)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.legend(loc=0)
plt.savefig('Up_peak.pdf')
'''
Up = df[df.M > (mean_Up - 3.0*sigma_Up)]
Up = Up[Up.M < (mean_Up + 3.0*sigma_Up)]
Up['Upx'] = Up.px1+Up.px2
Up['Upy'] = Up.py1+Up.py2
Up['Upz'] = Up.pz1+Up.pz2
Up['Upt'] = np.sqrt(np.square(Up.Upx) + np.square(Up.Upy))
Up['UE'] = Up.E1+Up.E2
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt_log.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 30]
temp = temp[temp.UE < 30]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt_log_2.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upt < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upt,bins=200,cmap='viridis')#,norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('Ue_Upt.pdf')
#########################################
#########################################
#fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
#temp = Up.drop(['Event','Run','Type1','Type2'],axis=1)
#temp = temp.drop(['E1','px1','py1','pz1','pt1','eta1','phi1','Q1'],axis=1)
#temp = temp.drop(['E2','px2','py2','pz2','pt2','eta2','phi2','Q2'],axis=1)
#scatter_matrix(temp, alpha=0.1, figsize=(20, 15),diagonal='kde')
#plt.savefig('scatter_matrix.jpg')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[Up.Upz < 120]
temp = temp[temp.UE < 150]
plt.hist2d(temp.UE,temp.Upz,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Z Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UE_Upz.pdf')
#########################################
UPp = df[df.M > (mean_U - 3.0*sigma_U)]
UPp = UPp[UPp.M < (mean_U + 3.0*sigma_U)]
UPp['UPpx'] = UPp.px1+UPp.px2
UPp['UPpy'] = UPp.py1+UPp.py2
UPp['UPpz'] = UPp.pz1+UPp.pz2
UPp['UPpt'] = np.sqrt(np.square(UPp.UPpx) + np.square(UPp.UPpy))
UPp['UpE'] = UPp.E1+UPp.E2
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpt < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpt,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UpE_UPpt_log.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpt < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpt,bins=200,cmap='viridis')#,norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Transverse Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UpE_UPpt.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = UPp[UPp.UPpz < 120]
temp = temp[temp.UpE < 150]
plt.hist2d(temp.UpE,temp.UPpz,bins=200,cmap='viridis',norm=LogNorm())
plt.xlabel(r'Energy (GeV)', fontsize=20)
plt.ylabel(r'Z Momentum (GeV)', fontsize=20)
plt.colorbar()
plt.savefig('UE_UPpz.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[np.abs(Up.Upz) < 200]
plt.hist(temp.Upz, 100, histtype=u'stepfilled',facecolor='b' , alpha=0.45)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.xlabel(r'Z Momentum (GeV)', fontsize=20)
#plt.colorbar()
plt.savefig('Upz.pdf')
#########################################
#########################################
fig = plt.figure(num=None, figsize=(16,9), dpi=200, facecolor='w', edgecolor='k')
temp = Up[np.abs(Up.Upt) < 20]
plt.hist(temp.Upt, 100, histtype=u'stepfilled',facecolor='b' , alpha=0.45)
plt.ylabel(r'Counts (#)', fontsize=18)
plt.xlabel(r'Transverse Momentum (GeV)', fontsize=20)
#plt.colorbar()
plt.savefig('Upt.pdf')
#########################################
|
tylern4/tylern4.github.io
|
OpenData/Upsilon/Upsilon.py
|
Python
|
mit
| 10,224
|
[
"Gaussian"
] |
beef99a835e4d4d261bf0a850d32e899263e48fc4ede2d341165ddde9fb0e69c
|
#!/usr/bin/env python
import sys
def convert(filename):
lines = open(filename).readlines()
t1 = ''.join(lines)
first = True
for i in range(len(lines)):
line = lines[i]
if line.startswith('from ASE'):
if first:
lines[i] = 'from ase import *\n'
first = False
else:
lines[i] = ''
t = ''.join(lines)
for old, new in [('GetCartesianPositions', 'get_positions'),
('SetCartesianPositions', 'set_positions'),
('GetPotentialEnergy', 'get_potential_energy'),
('SetCalculator', 'set_calculator'),
('GetScaledPositions', 'get_scaled_positions'),
('SetScaledPositions', 'set_scaled_positions'),
('SetUnitCell', 'set_cell'),
('GetUnitCell', 'get_cell'),
('GetBoundaryConditions', 'get_pbc'),
('GetCartesianForces', 'get_forces'),
('GetCartesianVelocities', 'get_velocities'),
('SetCartesianVelocities', 'set_velocities'),
('GetCartesianMomenta', 'get_momenta'),
('SetCartesianMomenta', 'set_momenta'),
('ListOfAtoms', 'Atoms'),
('periodic', 'pbc'),
('pbcity', 'periodicity'),
('.Converge(', '.run('),
('Repeat', 'repeat'),
('Numeric', 'numpy'),
('numpyal', 'Numerical'),
('GetAtomicNumber()', 'number'),
('GetChemicalSymbol()', 'symbol'),
('GetCartesianPosition()', 'position'),
('GetTag()', 'tag'),
('GetCharge()', 'charge'),
('GetMass()', 'mass'),
('GetCartesianMomentum()', 'momentum'),
('GetMagneticMoment()', 'magmom'),
]:
t = t.replace(old, new)
t2 = ''
while 1:
i = t.find('.')
i2 = t.find('def ')
if 0 <= i < i2:
n = 1
elif i2 != -1:
n = 4
i = i2
else:
break
t2 += t[:i + n]
t = t[i + n:]
if t[0].isupper() and t[1].islower():
j = t.find('(')
if j != -1 and t[2: j].isalpha():
for k in range(j):
if t[k].isupper() and k > 0:
t2 += '_'
t2 += t[k].lower()
t = t[j:]
t2 += t
if t2 != t1:
print filename, len(t1) - len(t2)
open(filename + '.bak', 'w').write(t1)
open(filename, 'w').write(t2)
for filename in sys.argv[1:]:
convert(filename)
|
freephys/python_ase
|
tools/ASE2ase.py
|
Python
|
gpl-3.0
| 2,843
|
[
"ASE"
] |
890a5825faa7974bf1f8937f31db3c76fa01f446a1237bfeac2c19263a10d45f
|
"""
Copyright (C) 2015, Jaguar Land Rover
This program is licensed under the terms and conditions of the
Mozilla Public License, version 2.0. The full text of the
Mozilla Public License is at https://www.mozilla.org/MPL/2.0/
Maintainer: Rudolf Streif (rstreif@jaguarlandrover.com)
Author: Anson Fan (afan1@jagualandrover.com)
"""
import datetime, pytz
from django.utils import timezone
from django.db import models
from django.core.exceptions import ValidationError
from vehicles.models import Vehicle
from tasks import notify_update
from tasks import terminate_agent
# Validators
def validate_upd_timeout_da(timeout):
if timeout < 0:
raise ValidationError("Timeout must be a positive number.")
def validate_upd_retries_da(retries):
if retries <= 0:
raise ValidationError("Retries must be 1 or larger.")
class Status:
"""
Status values for Update and Retry
"""
PENDING = "PE"
STARTED = "ST"
RUNNING = "RU"
ABORTED = "AB"
SUCCESS = "SU"
FAILED = "FA"
WAITING = "WA"
REJECTED = "RE"
TERMINATED = "TD"
class Agent(models.Model):
"""
Software package description
"""
pac_name_da = models.CharField('Agent Name', max_length=256)
pac_description_da = models.TextField('Agent Description', null=True, blank=True)
pac_version_da = models.CharField('Agent Version', max_length=256)
pac_file_da = models.FileField('Agent File')
pac_start_cmd = models.TextField('Agent Launch Command')
def get_name(self):
"""
Returns the package name.
"""
return self.pac_name_da
def __unicode__(self):
"""
Returns the package name.
"""
return self.pac_name_da
class UpdateDA(models.Model):
"""
Update description
An Update is defined by a vehicle and a software package that to
be sent to that vehicle.
"""
UPDATE_STATUS = (
(Status.PENDING, "Pending"),
(Status.STARTED, "Started"),
(Status.RUNNING, "Running"),
(Status.ABORTED, "Aborted"),
(Status.SUCCESS, "Success"),
(Status.FAILED, "Failed"),
(Status.WAITING, "Waiting"),
(Status.REJECTED, "Rejected"),
(Status.TERMINATED, "Terminated"),
)
upd_vehicle_da = models.ForeignKey(Vehicle, verbose_name='Vehicle')
upd_package_da = models.ForeignKey(Agent, verbose_name='Agent')
upd_status_da = models.CharField('Update Status',
max_length=2,
choices=UPDATE_STATUS,
default=Status.PENDING)
upd_expiration = models.DateTimeField('Valid Until')
upd_retries_da = models.IntegerField('Maximum Retries', validators=[validate_upd_retries_da], default="0")
@property
def upd_status_da_text(self):
return dict(self.UPDATE_STATUS)[self.upd_status_da]
def __unicode__(self):
"""
Returns the Update name.
"""
return self.update_name()
def update_name(self):
"""
Returns the Update name composed of
<package name> on <vehicle>.
"""
return ("'" +
self.upd_package_da.get_name() +
"' on '" +
self.upd_vehicle_da.get_name() +
"'"
)
def not_expired(self):
"""
Returns 'True' if this Update is not expired.
"""
return (timezone.now() <= self.upd_expiration)
not_expired.short_description = 'Not Expired'
not_expired.admin_order_field = 'udp_timeout'
not_expired.boolean = True
def retry_count(self):
"""
Returns the number of Retry objects associated with this
Update.
"""
return RetryDA.objects.filter(ret_update_da=self).count()
retry_count.short_description = "Retry Count"
def active_retries(self):
"""
Returns a list with all active Retry objects associated with
the Update. A Retry is active if its status is PENDING, STARTED,
RUNNING or WAITING.
"""
return RetryDA.objects.filter(ret_update_da=self,
ret_status_da=(Status.PENDING or Status.STARTED or Status.RUNNING or Status.WAITING)
)
def start(self):
"""
Start the update (send it to the vehicle).
Returns the Retry object that has been created to manage the
update process.
"""
if self.upd_status_da in [Status.PENDING, Status.ABORTED, Status.FAILED]:
retry = RetryDA(ret_update_da=self,
ret_start_da=timezone.now(),
ret_timeout_da=self.upd_expiration,
ret_status_da=Status.PENDING
)
retry.save()
self.upd_status_da = Status.STARTED
self.save()
notify_update(retry)
return retry
else:
return None
def terminate(self):
retry = RetryDA(ret_update_da=self,
ret_start_da=timezone.now(),
ret_timeout_da=self.upd_expiration,
ret_status_da=Status.PENDING
)
retry.save()
self.upd_status_da = Status.STARTED
self.save()
terminate_agent(retry)
return retry
def abort(self):
"""
Abort the Update and currently running Retry.
Returns the Retry object handling the update.
"""
if self.upd_status_da in [Status.STARTED, Status.RUNNING, Status.WAITING]:
retries = self.active_retries()
retry = None
if retries:
retry = retries[0]
retry.set_status(Status.ABORTED)
self.set_status(Status.ABORTED)
return retry
else:
return None
def delete(self):
"""
Delete this Update object.
Update objects can only be deleted if they are not currently
active.
"""
if not self.upd_status_da in [Status.STARTED, Status.RUNNING, Status.WAITING]:
super(Update, self).delete()
def set_status(self, status):
"""
Set the status of this Update object.
"""
self.upd_status_da = status
self.save()
class RetryDA(models.Model):
"""
Retry objects handle the actual update. They represent the state
of the update. Messages are logged against a Retry. That allows
comparing update attempts in case of failures etc.
"""
RETRY_STATUS = (
(Status.PENDING, "Pending"),
(Status.STARTED, "Started"),
(Status.RUNNING, "Running"),
(Status.ABORTED, "Aborted"),
(Status.SUCCESS, "Success"),
(Status.FAILED, "Failed"),
(Status.WAITING, "Waiting"),
(Status.REJECTED, "Rejected"),
(Status.TERMINATED, "Terminated"),
)
ret_update_da = models.ForeignKey(UpdateDA, verbose_name='Update')
ret_start_da = models.DateTimeField('Retry Started')
ret_timeout_da = models.DateTimeField('Retry Valid', null=True, blank=True)
ret_finish_da = models.DateTimeField('Retry Finished', null=True, blank=True)
ret_status_da = models.CharField('Retry Status',
max_length=2,
choices=RETRY_STATUS,
default=Status.PENDING)
class Meta:
verbose_name_plural = "Retries"
def __unicode__(self):
"""
Returns the name of this Retry which is composed of the name
of the Update and the start date/time of the Retry.
"""
return self.ret_update_da.update_name() + " " + self.ret_start_da.strftime("%Y-%m-%d %H:%M:%S")
def delete(self):
"""
Deletes this Retry. Deleting is only possible if the Retry is not
currently active.
"""
if not self.ret_status_da in [Status.STARTED, Status.RUNNING, Status.WAITING]:
super(RetryDA, self).delete()
def set_status(self, status):
"""
Set the status of this Retry.
"""
self.ret_status_da = status
if status in [Status.ABORTED, Status.SUCCESS, Status.FAILED, Status.REJECTED]:
self.ret_finish_da = timezone.now()
self.save()
def get_timeout_epoch(self):
"""
Returns this Retry's timeout in seconds since epoch (1970/01/01)
"""
return (self.ret_timeout_da.astimezone(pytz.UTC) - datetime.datetime(1970,1,1,tzinfo=pytz.UTC)).total_seconds()
|
afan1/rvi_backend
|
web/dynamicagents/models.py
|
Python
|
mpl-2.0
| 8,875
|
[
"Jaguar"
] |
0c622175db833de4deb0fa0935f384a534a23baf99180a6291bd45831bbbc1dc
|
import sys
tests = [("testExecs/main.exe", "", {}), ]
longTests = []
if __name__ == '__main__':
import sys
from rdkit import TestRunner
failed, tests = TestRunner.RunScript('test_list.py', 0, 1)
sys.exit(len(failed))
|
rvianello/rdkit
|
Code/Numerics/EigenSolvers/test_list.py
|
Python
|
bsd-3-clause
| 228
|
[
"RDKit"
] |
9758b5338bd10e2c197d4e88056bee3af56a96f4570e9206e2ff642bad0c4cbe
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name='pages/home.html'), name="home"),
url(r'^about/$', TemplateView.as_view(template_name='pages/about.html'), name="about"),
# Django Admin
url(r'^admin/', include(admin.site.urls)),
# User management
url(r'^users/', include("sendprism.users.urls", namespace="users")),
url(r'^accounts/', include('allauth.urls')),
# Your stuff: custom urls includes go here
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
url(r'^400/$', 'django.views.defaults.bad_request'),
url(r'^403/$', 'django.views.defaults.permission_denied'),
url(r'^404/$', 'django.views.defaults.page_not_found'),
url(r'^500/$', 'django.views.defaults.server_error'),
]
|
Ibtiss4m/sendprismApp
|
config/urls.py
|
Python
|
bsd-3-clause
| 1,230
|
[
"VisIt"
] |
3af7313588b36eeb3543ccb8d6b45a2b4411d7d0d3139fd8335e99e62257b5c1
|
"""
Thesaurus-API
~~~~~~~~~~~~~
An api for thesaurus.com. See the README for instructions.
A pythonic poem authored by Robert.
Inspiration and help from others (see credits).
If there's anything in here you don't understand or want me to change, just
make an issue or send me an email at robert <at> robertism <dot> com. Thanks :)
"""
from collections import namedtuple
import json
import requests
from bs4 import BeautifulSoup
from .exceptions import (
WordNotFoundError, ThesaurusRequestError, MisspellingError
)
# =========================== GLOBAL CONSTANTS =============================
ALL = 'all'
## form=
FORM_INFORMAL = 'informal'
FORM_COMMON = 'common'
# TODO: also include nltk pos_tagger constants
## partOfSpeech=
POS_ADJECTIVE, POS_ADJ = 'adj', 'adj'
POS_ADVERB, POS_ADV = 'adv', 'adv'
POS_CONTRADICTION, POS_CONT = 'contraction', 'contraction'
POS_CONJUNCTION, POS_CONJ = 'conj', 'conj'
POS_DETERMINER, POS_DET = 'determiner', 'determiner'
POS_INTERJECTION, POS_INTERJ = 'interj', 'interj'
POS_NOUN = 'noun'
POS_PREFIX = 'prefix'
POS_PREPOSITION, POS_PREP = 'prep', 'prep'
POS_PRONOUN, POS_PRON = 'pron', 'pron'
POS_VERB = 'verb'
POS_ABBREVIATION, POS_ABB = 'abb', 'abb'
POS_PHRASE = 'phrase'
POS_ARTICLE = 'article'
# ========================= END GLOBAL CONSTANTS ===========================
def formatWordUrl(inputWord):
"""Format our word in the url. I could've used urllib's quote thing, but
this is more efficient I think. Let me know if there's a word it doesn't
work for and I'll change it.
"""
url = 'https://www.thesaurus.com/browse/'
url = url + inputWord.strip().lower().replace(' ', '%20')
return url
def btw(inputString, lh, rh):
"""Extract a string between two other strings."""
return inputString.split(lh, 1)[1].split(rh, 1)[0]
def fetchWordData(inputWord):
"""Downloads the data thesaurus.com has for our word.
Parameters
----------
inputWord : str
The word you are searching for on thesaurus.com
Returns
-------
list of dict
A list of n+1 dictionaries, where n is the number of definitions for the
word, and the last dictionary holds information on word origin and
example sentences.
Each definition dict is of the form:
{
'meaning' : str,
'partOfSpeech' : str,
'isVulgar' : bool,
'syn' : [Entry(
word=str,
relevance=int,
length=int,
complexity=int,
form=str
)],
'ant' : [... same as 'syn' ...]
}
where `Entry` is a namedtuple.
"""
url = formatWordUrl(inputWord)
# Try to download the page source, else throw an error saying we couldn't
# connect to the website.
try:
r = requests.get(url)
except Exception as e:
raise ThesaurusRequestError(e)
soup = BeautifulSoup(r.content, 'html.parser')
# The site didn't have this word in their collection.
if '/noresult' in r.url:
raise WordNotFoundError(inputWord)
# Traverse the javascript to find where they embedded our data. It keeps
# changing index. It used to be 12, now it's 15. Yay ads and tracking!
data = soup.select('script')
for d in reversed(data):
if d.text[0:20] == 'window.INITIAL_STATE':
data = d.text[23:-1] # remove 'window.INITIAL_STATE = ' and ';'
data = json.loads(data)
break
# Disambiguation. They believe we've misspelled it, and they're providing us
# with potentially correct spellings. Only bother printing the first one.
if '/misspelling' in r.url:
# TODO: Should we include a way to retrieve this data?
otherWords = data.get('searchData', {}).get('spellSuggestionsData', [])
if not otherWords:
raise MisspellingError(inputWord, '')
else:
raise MisspellingError(inputWord, otherWords[0].get('term'))
defns = [] # where we shall store data for each definition tab
# how we will represent an individual synonym/antonym
Entry = namedtuple('Entry', ['word', 'relevance', 'length',
'complexity', 'form'])
## Utility functions to process attributes for our entries.
# a syn/ant's relevance is marked 1-3, where 10 -> 1, 100 -> 3.
calc_relevance = lambda x: [None, 10, 50, 100].index(x)
calc_length = lambda x: 1 if x < 8 else 2 if x < 11 else 3
calc_form = lambda x: 'informal' if x is True else 'common'
# iterate through each definition tab, extracting the data for the section
for defn in data['searchData']['tunaApiData']['posTabs']:
# this dict shall store the relevant data we found under the current def
curr_def = {
'partOfSpeech' : defn.get('pos'),
'meaning' : defn.get('definition'),
'isVulgar' : bool(int(defn.get('isVulgar'))),
'syn' : [],
'ant' : []
}
"""
the synonym and antonym data will each be stored as lists of tuples.
Each item in the tuple corresponds to a certain attribute of the
given syn/ant entry, and is used to filter out specific results when
Word.synonym() or Word.antonym() is called.
"""
### NOTE, TODO ###
"""
Currently, complexity is set to level == 0 as I hope it will return.
Originally, it was 1-3. In thesaurus.com's newest update, they removed
this complexity data, and made all other data difficult to locate.
I can't imagine them deleting this data... we shall see.
"""
for syn in defn.get('synonyms', []):
# tuple key is (word, relevance, length, complexity, form, isVulgar)
e = Entry(
word=syn['term'],
relevance=calc_relevance(abs(int(syn['similarity']))),
length=calc_length(len(syn['term'])),
complexity=0,
form=calc_form(bool(int(syn['isInformal'])))
# isVulgar=bool(syn['isVulgar']) # *Nested* key is useless.
)
curr_def['syn'].append(e)
for ant in defn.get('antonyms', []):
# tuple key is (word, relevance, length, complexity, form, isVulgar)
e = Entry(
word=ant['term'],
relevance=calc_relevance(abs(int(ant['similarity']))),
length=calc_length(len(ant['term'])),
complexity=0,
form=calc_form(bool(int(ant['isInformal'])))
# isVulgar=bool(ant['isVulgar']) # *Nested* key is useless.
)
curr_def['ant'].append(e)
defns.append(curr_def)
# add origin and examples to the last element so we can .pop() it out later
otherData = data['searchData']['tunaApiData']
examples = [x['sentence'] for x in otherData['exampleSentences']]
etymology = otherData['etymology']
if len(etymology) > 0:
origin = BeautifulSoup(etymology[0]['content'], "html.parser").text
## Uncomment this if you actually care about getting the ENTIRE
## origin box. I don't think you do, though.
# origin = reduce(lambda x,y: x+y, map(
# lambda z: BeautifulSoup(z['content'], "html.parser").text
# ))
else:
origin = ''
defns.append({
'examples': examples,
'origin': origin
})
return defns
class Word(object):
def __init__(self, inputWord):
"""Downloads and stores the data thesaurus.com has for a given word.
Parameters
----------
inputWord : str
The word you wish to search for on thesaurus.com
"""
# in case you want to visit it later
self.url = formatWordUrl(inputWord)
self.data = fetchWordData(inputWord)
self.extra = self.data.pop()
def __len__(self):
# returns the number of definitions the word has
return len(self.data)
### FUNCTIONS TO HELP ORGANIZE DATA WITHIN THE CLASS ###
def _filter(self, mode, defnNum='all', **filters):
"""Filter out our self.data to reflect only words with certain
attributes specified by the user. Ex: return informal synonyms that are
relevant and have many characters.
NOTE:
COMPLEXITY filter is STILL BROKEN thanks to the site's update. It will
simply be ignored for the time being.
Parameters
----------
mode : {'syn', 'ant'}
Filters through the synonyms if 'syn', or antonyms if 'ant'.
defnNum : int or 'all', optional
The word definition we are filtering data from (index of self.data).
Thus, as it is an index, it must be >= 0. If 'all' is specified,
however, it will filter through all definitions. This is the default
NOTE:
The following filters are capable of being specified as explicit values,
or lists of acceptable values. Ex: relevance=1 or relevance=[1,2].
relevance : {1, 2, 3} or list, optional
1 least relevant - 'enfeebled'
2
3 most relevant - 'elderly'
partOfSpeech : { POS_* } or list, optional
The following possible values are also defined as constants at the
beginning of the file. You can call them as: POS_ADVERB or POS_ADV.
The complete list is as follows:
adjective: 'adj'
adverb: 'adv'
contraction: 'contraction'
conjunction: 'conj'
determiner: 'determiner'
interjection: 'interj'
noun: 'noun'
prefix: 'prefix'
preposition: 'prep'
pronoun: 'pron'
verb: 'verb'
abbreviation: 'abb'
phrase: 'phrase'
article: 'article'
length : {1, 2, 3} or list, optional
1 shortest - aged
2
3 longest - experienced
complexity : {1, 2, 3} or list, optional
Reminder that this is CURRENTLY BROKEN. It will default to `None`,
no matter what values you choose.
1 least complex
2
3 most complex
form : {'informal', 'common'} or list, optional
Similar to the partOfSpeech options, these values are also defined
as constants: FORM_INFORMAL and FORM_COMMON.
Before thesaurus.com changed their code, it used to be that the
majority of words were neither informal nor common. Thus, it wasn't
the case that common inferred not-informal. Now, however, all words
are either informal or common.
isVulgar : bool, optional
Similar to partOfSpeech, if `True`, will blank out non-vulgar
definition entries. If `False`, will filter out vulgar definitions.
Think of it as having only two different POS's to select from.
Returns
-------
list of list of str OR list of str
If defnNum is set to 'all', it will filter over all definitions, and
will return a list of list of str, where each nested list is a
single definition.
If defnNum is set to an integer, it will return a list of str, where
the str's are the filtered words for that single definition.
"""
def compare_entries(e1, e2):
if isinstance(e2, list):
if None in e2:
return True
else:
return e1 in e2
else:
if None in {e1, e2}:
return True
else:
return e1 == e2
Filters = namedtuple('Filters', [
'relevance',
'partOfSpeech',
'length',
'complexity', # currently unavailable
'form',
'isVulgar'
])
filters = filters.get('filters', {})
for key, val in filters.items():
# make all filters in list format, so 1 becomes [1]. This makes
# checking equality between entries and filters easier.
if not isinstance(val, list):
filters[key] = [val]
# We can't change a namedtuple's values after creating it. We have to
# make sure it matches the user's filter value before we set it.
_tempForm = filters.get('form')
if _tempForm: # make sure it's not NoneType first.
for i, _form in enumerate(_tempForm):
if 'informal' in _form.lower():
_tempForm[i] = 'informal'
elif 'common' in _form.lower():
_tempForm[i] = 'common'
else:
# reset form to be None, thus ignoring the improper option
print('Please select `informal` or `common` for `form=` filter.')
print('Defaulting to select both.')
_tempForm = None
break
fs = Filters(
relevance= filters.get('relevance'),
partOfSpeech= filters.get('partOfSpeech', filters.get('pos')),
length= filters.get('length'),
complexity= None, # not currently implemented.
form= _tempForm,
isVulgar= filters.get('isVulgar')
)
if defnNum == 'all':
# examines all definition tabs for a word
startRange, endRange = 0, len(self.data)
else:
# examines only the tab index specified (starting at 0)
startRange, endRange = defnNum, defnNum+1
filtered_data = [] # data we are going to return
for defn in self.data[startRange:endRange]:
# current defn tab is not of the pos we require. continue.
if not compare_entries(defn['partOfSpeech'], fs.partOfSpeech):
filtered_data.append([])
continue
# current defn tab is not of the vulgarity we require. continue.
if not compare_entries(defn['isVulgar'], fs.isVulgar):
filtered_data.append([])
continue
# holds all the relevant entries for this defn.
cur_data = []
for entry in defn.get(mode):
if (
compare_entries(entry.relevance, fs.relevance) and
compare_entries(entry.length, fs.length) and
compare_entries(entry.form, fs.form)
):
cur_data.append(entry.word)
# if we only care about a single definition, just return a 1d list.
if defnNum != 'all':
return cur_data
filtered_data.append(cur_data)
return filtered_data
### FUNCTIONS TO RETURN DATA YOU WANT ###
"""Each of the following functions allow you to filter the output
accordingly: relevance, partOfSpeech, length, complexity, form.
"""
def synonyms(self, defnNum=0, allowEmpty=True, **filters):
"""Return synonyms for specific definitions, filtered to only include
words with specified attribute values.
PLEASE see _filter()'s docstring or the README for more information on
filtering.
Parameters
----------
defnNum : int or 'all', optional
The word definition we are returning data from (index of self.data).
Thus, as it is an index, it must be >= 0. If 'all' is specified,
however, it will filter through all definitions. 0 is the default.
allowEmpty : bool, optional
Filters the output to only include defns (represented as lists) that
are not empty after being filtered. Useful if you are trying to only
see definitions of a certain part of speech. This way, you can
enumerate over the returned values without having to worry if you're
enumerating over an empty value.
Returns
-------
list of list of str OR list of str
If defnNum is set to 'all', it will include data from all
definitions, returning a list of list of str, where each nested list
is a single definition.
If defnNum is set to an integer, it will return a list of str, where
the str's are the filtered words for that single definition.
"""
data = self._filter(mode='syn', defnNum=defnNum, filters=filters)
# the word does not exist. return empty.
if not data:
return []
if allowEmpty:
return data
else:
return [d for d in data if len(d) > 0]
def antonyms(self, defnNum=0, allowEmpty=True, **filters):
"""Return antonyms for specific definitions, filtered to only include
words with specified attribute values.
PLEASE see _filter()'s docstring or the README for more information on
filtering.
Parameters
----------
defnNum : int or 'all', optional
The word definition we are returning data from (index of self.data).
Thus, as it is an index, it must be >= 0. If 'all' is specified,
however, it will filter through all definitions. 0 is the default.
allowEmpty : bool, optional
Filters the output to only include defns (represented as lists) that
are not empty after being filtered. Useful if you are trying to only
see definitions of a certain part of speech. This way, you can
enumerate over the returned values without having to worry if you're
enumerating over an empty value.
Returns
-------
list of list of str OR list of str
If defnNum is set to 'all', it will include data from all
definitions, returning a list of list of str, where each nested list
is a single definition.
If defnNum is set to an integer, it will return a list of str, where
the str's are the filtered words for that single definition.
"""
data = self._filter(mode='ant', defnNum=defnNum, filters=filters)
# the word does not exist. return empty.
if not data:
return []
if allowEmpty:
return data
else:
return [d for d in data if len(d) > 0]
def origin(self):
"""Gets the origin of a word.
Returns
-------
str
It's the paragraph that's on the right side of the page. It talks a
bit about how the modern meaning of the word came to be.
"""
return self.extra['origin']
def examples(self):
"""Gets sentences the word is used in.
Returns
-------
list of str
Each str is a sentence the word is used in.
"""
return self.extra['examples']
|
Manwholikespie/thesaurus-api
|
thesaurus/thesaurus.py
|
Python
|
mit
| 19,571
|
[
"VisIt"
] |
59d5c47ec6cda16a765e33ee38ec0f1940997f0b45605df305fc75e78800b529
|
# class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkPointSetAlgorithm(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkPointSetAlgorithm(), 'Processing.',
('vtkPointSet',), ('vtkPointSet',),
replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
nagyistoce/devide
|
modules/vtk_basic/vtkPointSetAlgorithm.py
|
Python
|
bsd-3-clause
| 495
|
[
"VTK"
] |
833aa93a2d2e40b8bf6ccc287f41b39bf5961d5cd8a99083d64168beec498f92
|
# -*- coding: utf-8 -*-
"""Tests for gam.AdditiveModel and GAM with Polynomials compared to OLS and GLM
Created on Sat Nov 05 14:16:07 2011
Author: Josef Perktold
License: BSD
Notes
-----
TODO: TestGAMGamma: has test failure (GLM looks good),
adding log-link didn't help
resolved: gamma doesn't fail anymore after tightening the
convergence criterium (rtol=1e-6)
TODO: TestGAMNegativeBinomial: rvs generation doesn't work,
nbinom needs 2 parameters
TODO: TestGAMGaussianLogLink: test failure,
but maybe precision issue, not completely off
but something is wrong, either the testcase or with the link
>>> tt3.__class__
<class '__main__.TestGAMGaussianLogLink'>
>>> tt3.res2.mu_pred.mean()
3.5616368292650766
>>> tt3.res1.mu_pred.mean()
3.6144278964707679
>>> tt3.mu_true.mean()
34.821904835958122
>>>
>>> tt3.y_true.mean()
2.685225067611543
>>> tt3.res1.y_pred.mean()
0.52991541684645616
>>> tt3.res2.y_pred.mean()
0.44626406889363229
one possible change
~~~~~~~~~~~~~~~~~~~
add average, integral based tests, instead of or additional to sup
* for example mean squared error for mu and eta (predict, fittedvalues)
or mean absolute error, what's the scale for this? required precision?
* this will also work for real non-parametric tests
example: Gamma looks good in average bias and average RMSE (RMISE)
>>> tt3 = _estGAMGamma()
>>> np.mean((tt3.res2.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
-0.0051829977497423706
>>> np.mean((tt3.res2.y_pred - tt3.y_true))/tt3.y_true.mean()
0.00015255264651864049
>>> np.mean((tt3.res1.y_pred - tt3.y_true))/tt3.y_true.mean()
0.00015255538823786711
>>> np.mean((tt3.res1.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
-0.0051937668989744494
>>> np.sqrt(np.mean((tt3.res1.mu_pred - tt3.mu_true)**2))/tt3.mu_true.mean()
0.022946118520401692
>>> np.sqrt(np.mean((tt3.res2.mu_pred - tt3.mu_true)**2))/tt3.mu_true.mean()
0.022953913332599746
>>> maxabs = lambda x: np.max(np.abs(x))
>>> maxabs((tt3.res1.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
0.079540546242707733
>>> maxabs((tt3.res2.mu_pred - tt3.mu_true))/tt3.mu_true.mean()
0.079578857986784574
>>> maxabs((tt3.res2.y_pred - tt3.y_true))/tt3.y_true.mean()
0.016282852522951426
>>> maxabs((tt3.res1.y_pred - tt3.y_true))/tt3.y_true.mean()
0.016288391235613865
"""
from statsmodels.compat.python import get_class, lrange
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from scipy import stats
from statsmodels.sandbox.gam import AdditiveModel
from statsmodels.sandbox.gam import Model as GAM #?
from statsmodels.genmod.families import family, links
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.regression.linear_model import OLS
class Dummy(object):
pass
class CheckAM(object):
def test_predict(self):
assert_almost_equal(self.res1.y_pred,
self.res2.y_pred, decimal=2)
assert_almost_equal(self.res1.y_predshort,
self.res2.y_pred[:10], decimal=2)
def _est_fitted(self):
#check definition of fitted in GLM: eta or mu
assert_almost_equal(self.res1.y_pred,
self.res2.fittedvalues, decimal=2)
assert_almost_equal(self.res1.y_predshort,
self.res2.fittedvalues[:10], decimal=2)
def test_params(self):
#note: only testing slope coefficients
#constant is far off in example 4 versus 2
assert_almost_equal(self.res1.params[1:],
self.res2.params[1:], decimal=2)
#constant
assert_almost_equal(self.res1.params[1],
self.res2.params[1], decimal=2)
def _est_df(self):
#not used yet, copied from PolySmoother tests
assert_equal(self.res_ps.df_model(), self.res2.df_model)
assert_equal(self.res_ps.df_fit(), self.res2.df_model) #alias
assert_equal(self.res_ps.df_resid(), self.res2.df_resid)
class CheckGAM(CheckAM):
def test_mu(self):
#problem with scale for precision
assert_almost_equal(self.res1.mu_pred,
self.res2.mu_pred, decimal=0)
# assert_almost_equal(self.res1.y_predshort,
# self.res2.y_pred[:10], decimal=2)
class BaseAM(object):
def __init__(self):
#DGP: simple polynomial
order = 3
nobs = 200
lb, ub = -3.5, 3
x1 = np.linspace(lb, ub, nobs)
x2 = np.sin(2*x1)
x = np.column_stack((x1/x1.max()*1, 1.*x2))
exog = (x[:,:,None]**np.arange(order+1)[None, None, :]).reshape(nobs, -1)
idx = lrange((order+1)*2)
del idx[order+1]
exog_reduced = exog[:,idx] #remove duplicate constant
y_true = exog.sum(1) #/ 4.
#z = y_true #alias check
#d = x
self.nobs = nobs
self.y_true, self.x, self.exog = y_true, x, exog_reduced
class TestAdditiveModel(BaseAM, CheckAM):
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
nobs = self.nobs
y_true, x, exog = self.y_true, self.x, self.exog
np.random.seed(8765993)
sigma_noise = 0.1
y = y_true + sigma_noise * np.random.randn(nobs)
m = AdditiveModel(x)
m.fit(y)
res_gam = m.results #TODO: currently attached to class
res_ols = OLS(y, exog).fit()
#Note: there still are some naming inconsistencies
self.res1 = res1 = Dummy() #for gam model
#res2 = Dummy() #for benchmark
self.res2 = res2 = res_ols #reuse existing ols results, will add additional
res1.y_pred = res_gam.predict(x)
res2.y_pred = res_ols.model.predict(res_ols.params, exog)
res1.y_predshort = res_gam.predict(x[:10])
slopes = [i for ss in m.smoothers for i in ss.params[1:]]
const = res_gam.alpha + sum([ss.params[1] for ss in m.smoothers])
#print const, slopes
res1.params = np.array([const] + slopes)
class BaseGAM(BaseAM, CheckGAM):
def init(self):
nobs = self.nobs
y_true, x, exog = self.y_true, self.x, self.exog
if not hasattr(self, 'scale'):
scale = 1
else:
scale = self.scale
f = self.family
self.mu_true = mu_true = f.link.inverse(y_true)
np.random.seed(8765993)
#y_obs = np.asarray([stats.poisson.rvs(p) for p in mu], float)
if issubclass(get_class(self.rvs), stats.rv_discrete):
# Discrete distributions don't take `scale`.
y_obs = self.rvs(mu_true, size=nobs)
else:
y_obs = self.rvs(mu_true, scale=scale, size=nobs)
m = GAM(y_obs, x, family=f) #TODO: y_obs is twice __init__ and fit
m.fit(y_obs, maxiter=100)
res_gam = m.results
self.res_gam = res_gam #attached for debugging
self.mod_gam = m #attached for debugging
res_glm = GLM(y_obs, exog, family=f).fit()
#Note: there still are some naming inconsistencies
self.res1 = res1 = Dummy() #for gam model
#res2 = Dummy() #for benchmark
self.res2 = res2 = res_glm #reuse existing glm results, will add additional
#eta in GLM terminology
res2.y_pred = res_glm.model.predict(res_glm.params, exog, linear=True)
res1.y_pred = res_gam.predict(x)
res1.y_predshort = res_gam.predict(x[:10]) #, linear=True)
#mu
res2.mu_pred = res_glm.model.predict(res_glm.params, exog, linear=False)
res1.mu_pred = res_gam.mu
#parameters
slopes = [i for ss in m.smoothers for i in ss.params[1:]]
const = res_gam.alpha + sum([ss.params[1] for ss in m.smoothers])
res1.params = np.array([const] + slopes)
class TestGAMPoisson(BaseGAM):
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
self.family = family.Poisson()
self.rvs = stats.poisson.rvs
self.init()
class TestGAMBinomial(BaseGAM):
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
self.family = family.Binomial()
self.rvs = stats.bernoulli.rvs
self.init()
class _estGAMGaussianLogLink(BaseGAM):
#test failure, but maybe precision issue, not far off
#>>> np.mean(np.abs(tt.res2.mu_pred - tt.mu_true))
#0.80409736263199649
#>>> np.mean(np.abs(tt.res2.mu_pred - tt.mu_true))/tt.mu_true.mean()
#0.023258245077813208
#>>> np.mean((tt.res2.mu_pred - tt.mu_true)**2)/tt.mu_true.mean()
#0.022989403735692578
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
self.family = family.Gaussian(links.log)
self.rvs = stats.norm.rvs
self.scale = 5
self.init()
class TestGAMGamma(BaseGAM):
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
self.family = family.Gamma(links.log)
self.rvs = stats.gamma.rvs
self.init()
class _estGAMNegativeBinomial(BaseGAM):
#rvs generation doesn't work, nbinom needs 2 parameters
def __init__(self):
super(self.__class__, self).__init__() #initialize DGP
self.family = family.NegativeBinomial()
self.rvs = stats.nbinom.rvs
self.init()
if __name__ == '__main__':
t1 = TestAdditiveModel()
t1.test_predict()
t1.test_params()
for tt in [TestGAMPoisson, TestGAMBinomial, TestGAMGamma,
_estGAMGaussianLogLink]: #, TestGAMNegativeBinomial]:
tt = tt()
tt.test_predict()
tt.test_params()
tt.test_mu
|
hlin117/statsmodels
|
statsmodels/sandbox/tests/test_gam.py
|
Python
|
bsd-3-clause
| 9,820
|
[
"Gaussian"
] |
755a019fa7700353a5f367cd3404dc762b17773c7ddecc3fa778bc924ccf0ab8
|
# The MIT License (MIT)
# Copyright (c) 2016, 2017 by the ESA CCI Toolbox development team and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Description
===========
.. _xarray: http://xarray.pydata.org/en/stable/
.. _Dask: http://dask.pydata.org/en/latest/
.. _ESRI Shapefile: https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf
.. _netCDF: http://www.unidata.ucar.edu/software/netcdf/docs/
.. _Common Data Model: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/CDM
.. _Fiona: http://toblerity.org/fiona/
.. _CCI Toolbox URD: https://www.dropbox.com/s/0bhp6uwwk6omj8k/CCITBX-URD-v1.0Rev1.pdf?dl=0
This module provides classes and interfaces used to harmonise the access to and operations on various
types of climate datasets, for example gridded data stored in `netCDF`_ files and vector data originating from
`ESRI Shapefile`_ files.
The goal of the Cate is to reuse existing, and well-known APIs for a given data type to a maximum extent
instead of creating a complex new API. Therefore Cate uses the xarray_ N-D Gridded Datasets Python API
that represents nicely netCDF, HDF-5 and OPeNDAP data types, i.e. Unidata's `Common Data Model`_.
For the ESRI Shapefile representation we target at Fiona_, which reads and writes spatial data files.
The use of xarray_ allows the CCI Toolbox to access and process very large datasets without the need to load them
entirely into memory. This feature is enabled by the internal use of the Dask_ library.
Technical Requirements
======================
**Common Data Model**
:Description: A common data model is required that abstracts from underlying (climate) data formats.
:URD References:
* CCIT-UR-DM0001: a) access, b) ingest, c) display, d) process different kinds and sizes of data
* CCIT-UR-DM0003: multi-dimensional data
* CCIT-UR-DM0005: access all ECV data products and metadata via standard user-community interfaces,
protocols, and tools
* CCIT-UR-DM0006: access to and ingestion of ESA CCI datasets
* CCIT-UR-DM0011: access to and ingestion of non-CCI data
* CCIT-UR-DM0012: handle different input file formats
----
**Common Set of (Climate) Operations**
:Description: Instances of the common data model are the input for various operations used for climate data
visualisation, processing, and analysis. Depending on the underlying data format / schema, a given
operations may not be applicable. The API shall provide the means to chack in advance, if a given operation
is applicable to a given common data model instance.
:URD-References:
* CCIT-UR-LM0009 to CCIT-UR-LM0018: Geometric Adjustments/Co-registration.
* CCIT-UR-LM0019 to CCIT-UR-LM0024: Non-geometric Adjustments.
* CCIT-UR-LM0025 to CCIT-UR-LM0034: Filtering, Extractions, Definitions, Selections.
* CCIT-UR-LM0035 to CCIT-UR-LM0043: Statistics and Calculations.
* CCIT-UR-LM0044: GIS Tools.
* CCIT-UR-LM0045 to CCIT-UR-LM0050: Evaluation and Quality Control.
----
**Handle large Data Sets**
:Description: A single variable in ECV dataset may contain tens of gigabytes of gridded data.
The common data model must be able to "handle" data sizes by different means. For example, lazy loading
of data into memory combined with a programming model that allows for partial processing of data subsets
within an operation.
:URD References:
* CCIT-UR-DM0002: handle large datasets
* CCIT-UR-DM0003: multi-dimensional data
* CCIT-UR-DM0004: multiple inputs
----
Verification
============
The module's unit-tests are located
* `test/ops/test_resample_2d.py <https://github.com/CCI-Tools/cate/blob/master/test/ops/test_resample_2d.py>`_.
* `test/ops/test_downsample_2d.py <https://github.com/CCI-Tools/cate/blob/master/test/ops/test_downsample_2d.py>`_.
* `test/ops/test_upsample_2d.py <https://github.com/CCI-Tools/cate/blob/master/test/ops/test_upsample_2d.py>`_.
* `test/ops/test_timeseries.py <https://github.com/CCI-Tools/cate/blob/master/test/ops/test_timeseries.py>`_.
and may be executed using ``$ py.test test/ops/test_<MODULE>.py --cov=cate/ops/<MODULE>.py`` for extra code coverage
information.
Components
==========
"""
import warnings
from collections import OrderedDict
from typing import List, Optional, Union
import xarray as xr
from .opimpl import get_lat_dim_name_impl, get_lon_dim_name_impl
from ..util.im import GeoExtent, TilingScheme
from ..util.misc import object_to_qualified_name, qualified_name_to_object
__author__ = "Norman Fomferra (Brockmann Consult GmbH)," \
"Janis Gailis (S[&]T Norway)"
class Schema:
"""
.. _Schema for NcML: http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/ncml/AnnotatedSchema4.html
.. _netCDF Java Schema: https://www.unidata.ucar.edu/software/netcdf/java/docs/ucar/netcdf/Schema.html
.. _GeoJSON: http://geojson.org/geojson-spec.html
.. _Shapefile: https://en.wikipedia.org/wiki/Shapefile
Simple data structure description that focuses on the (geophysical) variables provided by some dataset. It is
mainly modelled after the netCDF CD common data model (see also `Schema for NcML`_, `netCDF Java Schema`_).
However, this schema intentionally lacks the explicit definition of *groups*, as defined by the netCDF CDM.
Groups are no more than a physical container of variables which can be easily represented as parent
path components of names of variables, dimensions, and attributes. E.g. if a variable is named ``data/ndvi`` then
it is in group ``data``. If an attribute is named ``data/ndvi/originator`` then it is an attribute of variable
``ndvi`` which is in the group ``data``.
This schema allows to represent both raster / gridded data types and GIS data. Raster / gridded data may originate
from netCDF, HDF, GeoTIFF, or others. GIS-type vector data types may originate
from a Shapefile_ or GeoJSON_ file. It comprises only three basic data structures:
* ``Variable`` the primary data provided by a dataset, usually geophysical, climate measurements or computed values.
* ``Dimension`` provides a description of a dimension used by one or more N-D variables.
* ``Attribute`` provides meta-information to variables and any groups that occur as path components of an
attribute name.
Important note: The name ``Attribute`` used here must not be confused with the "attribute" of a "(simple)
feature type" as used within the OGC GML/GIS terminology.
The CCI Toolbox maps attributes of OGC features types to *Variables* to match the terminology used in
this schema.
:param dimensions: dimensions in this schema
:param variables: variables in this schema
:param attributes: attributes in this schema
"""
def __init__(self,
name: str,
lon_name: str = 'lon',
lat_name: str = 'lat',
time_name: str = 'time',
dimensions: List['Schema.Dimension'] = None,
variables: List['Schema.Variable'] = None,
attributes: List['Schema.Attribute'] = None):
if not name:
raise ValueError('name must be given')
self.name = name
self.lon_name = lon_name
self.lat_name = lat_name
self.time_name = time_name
self.dimensions = list(dimensions) if dimensions else []
self.variables = list(variables) if variables else []
self.attributes = list(attributes) if attributes else []
def dimension(self, index_or_name):
try:
return self.dimensions[index_or_name]
except (IndexError, TypeError):
for dimension in self.dimensions:
if dimension.name == index_or_name:
return dimension
return None
@classmethod
def from_json_dict(cls, json_dict) -> 'Schema':
name = json_dict.get('name', None)
lon_name = json_dict.get('lon_name', 'lon')
lat_name = json_dict.get('lat_name', 'lat')
time_name = json_dict.get('time_name', 'time')
json_dimensions = json_dict.get('dimensions', [])
json_variables = json_dict.get('variables', [])
json_attributes = json_dict.get('attributes', [])
dimensions = []
for json_dimensions in json_dimensions:
dimensions.append(Schema.Dimension.from_json_dict(json_dimensions))
variables = []
for json_variable in json_variables:
variables.append(Schema.Variable.from_json_dict(json_variable))
attributes = []
for json_attribute in json_attributes:
attributes.append(Schema.Attribute.from_json_dict(json_attribute))
return Schema(name, lon_name, lat_name, time_name,
dimensions=dimensions,
variables=variables,
attributes=attributes)
def to_json_dict(self) -> dict:
json_dict = OrderedDict()
json_dict['name'] = self.name
json_dict['lon_name'] = self.lon_name
json_dict['lat_name'] = self.lat_name
json_dict['time_name'] = self.time_name
json_dict['variables'] = [variable.to_json_dict() for variable in self.variables]
json_dict['dimensions'] = [dimension.to_json_dict() for dimension in self.dimensions]
json_dict['attributes'] = [attribute.to_json_dict() for attribute in self.attributes]
return json_dict
class Variable:
"""
Represents a (geophysical) variable of a specified data type and array shape.
"""
def __init__(self,
name: str,
data_type: type,
dimension_names: List[str] = None,
attributes: List['Schema.Attribute'] = None):
self.name = name
self.data_type = data_type
self.dimension_names = list(dimension_names) if dimension_names else []
self.attributes = list(attributes) if attributes else []
@property
def rank(self):
return len(self.dimension_names)
def dimension(self, schema: 'Schema', index: int):
name = self.dimension_names[index]
return schema.dimension(name)
@classmethod
def from_json_dict(cls, json_dict) -> 'Schema.Variable':
name = json_dict.get('name', None)
data_type = qualified_name_to_object(json_dict.get('data_type', None))
dimension_names = json_dict.get('dimension_names', [])
json_attributes = json_dict.get('attributes', [])
attributes = []
for json_attribute in json_attributes:
attributes.append(Schema.Attribute.from_json_dict(json_attribute))
return Schema.Variable(name,
data_type,
dimension_names=dimension_names,
attributes=attributes)
def to_json_dict(self) -> dict:
json_dict = OrderedDict()
json_dict['name'] = self.name
json_dict['data_type'] = object_to_qualified_name(self.data_type)
json_dict['dimension_names'] = self.dimension_names
json_dict['attributes'] = [attribute.to_json_dict() for attribute in self.attributes]
return json_dict
class Dimension:
"""
Provides a description of a dimension used by one or more N-D variables.
"""
def __init__(self, name: str,
length=None,
attributes: List['Schema.Attribute'] = None):
self.name = name
self.attributes = list(attributes) if attributes else []
if length is not None:
self.attributes.append(Schema.Attribute('length', int, length))
@classmethod
def from_json_dict(cls, json_dict) -> 'Schema.Dimension':
name = json_dict.get('name', None)
json_attributes = json_dict.get('attributes', [])
attributes = []
for json_attribute in json_attributes:
attributes.append(Schema.Attribute.from_json_dict(json_attribute))
return Schema.Dimension(name, attributes=attributes)
def to_json_dict(self) -> dict:
json_dict = OrderedDict()
json_dict['name'] = self.name
json_dict['attributes'] = [attribute.to_json_dict() for attribute in self.attributes]
return json_dict
class Attribute:
"""
An attribute is a name-value pair of a specified type.
The main purpose of attributes is to attach meta-information to datasets and variables.
Values are usually scalars and may remain constant over
multiple datasets that use the same schema (e.g. missing value, coordinate reference system, originator).
"""
def __init__(self,
name: str,
data_type: type = str,
value: object = None):
self.name = name
self.data_type = data_type
self.value = value
@classmethod
def from_json_dict(cls, json_dict) -> 'Schema.Attribute':
name = json_dict.get('name', None)
data_type = qualified_name_to_object(json_dict.get('data_type', None))
value = json_dict.get('value', None)
return Schema.Attribute(name, data_type, value=value)
def to_json_dict(self) -> dict:
json_dict = OrderedDict()
json_dict['name'] = self.name
json_dict['data_type'] = object_to_qualified_name(self.data_type)
json_dict['value'] = self.value
return json_dict
def get_lon_dim_name(ds: Union[xr.Dataset, xr.DataArray]) -> Optional[str]:
"""
Get the name of the longitude dimension.
:param ds: An xarray Dataset
:return: the name or None
"""
return get_lon_dim_name_impl(ds)
def get_lat_dim_name(ds: Union[xr.Dataset, xr.DataArray]) -> Optional[str]:
"""
Get the name of the latitude dimension.
:param ds: An xarray Dataset
:return: the name or None
"""
return get_lat_dim_name_impl(ds)
def get_tiling_scheme(var: xr.DataArray) -> Optional[TilingScheme]:
"""
Compute a tiling scheme for the given variable *var*.
:param var: A variable of an xarray dataset.
:return: a new TilingScheme object or None if *var* cannot be represented as a spatial image
"""
lat_dim_name = get_lat_dim_name(var)
lon_dim_name = get_lon_dim_name(var)
if not lat_dim_name or not lon_dim_name:
return None
if lat_dim_name not in var.coords or lon_dim_name not in var.coords:
return None
width, height = var.shape[-1], var.shape[-2]
lats = var.coords[lat_dim_name]
lons = var.coords[lon_dim_name]
try:
geo_extent = GeoExtent.from_coord_arrays(lons, lats)
except ValueError as e:
warnings.warn(f'failed to derive geo-extent for tiling scheme: {e}')
# Create a default geo-extent which is probably wrong, but at least we see something
geo_extent = GeoExtent()
try:
return TilingScheme.create(width, height, 360, 360, geo_extent)
except ValueError:
return TilingScheme(1, 1, 1, width, height, geo_extent)
|
CCI-Tools/cate-core
|
cate/core/cdm.py
|
Python
|
mit
| 16,383
|
[
"NetCDF"
] |
40198511574ff1f232d5af53f7ecdd8c3ab716ecb3f3c15418e2bce01dd9ac0a
|
"""
@name: Modules/Computer/Communication/communication.py
@author: D. Brian Kimmel
@contact: D.BrianKimmel@gmail.com
@copyright: (c) 2017-2019 by D. Brian Kimmel
@note: Created on Jan 9, 2017
@license: MIT License
@summary:
"""
__updated__ = '2019-10-11'
# Import system type stuff
# Import PyMh files and modules.
from Modules.Core import logging_pyh as Logger
LOG = Logger.getLogger('PyHouse.Communication ')
class lightingUtilityComm:
def read_xml(self, p_pyhouse_obj):
"""Read all the information.
"""
self.m_count = 0
l_dict = {}
try:
_l_xml = p_pyhouse_obj.Xml.XmlRoot.find('ComputerDivision').find('CommunicationSection')
except AttributeError as e_err:
LOG.error('ERROR in read_xml() - {}'.format(e_err))
return l_dict
class Api:
def __init__(self, p_pyhouse_obj):
self.m_pyhouse_obj = p_pyhouse_obj
def LoadConfig(self):
"""
"""
LOG.info('Loaded Communication Config.')
def Start(self):
pass
def Stop(self):
pass
def SaveConfig(self):
LOG.info("Saved Communication Config.")
# ## END DBK
|
DBrianKimmel/PyHouse
|
Project/src/Modules/Computer/Communication/communication.py
|
Python
|
mit
| 1,191
|
[
"Brian"
] |
855564abde18a72aad5bb6bceab810af0159e05620d76cc0dedf3442a8ee1cee
|
#!/usr/bin/env python
# encoding: utf-8
from logs import *
logger = logging.getLogger(__name__)
from common import *
from game_art import Art
class __CentralLoggers(object):
"""dumping ground for messy loggers
from central"""
def __init__(self):
pass
def logger_attr_set(self,attr,val):
"""Logs attribute and value cutting off at 10char
used in Central().newgame()"""
# logging
str_val = str(val) # for logging
if len(str_val) > 9:
str_val=str_val[:10]
self.logger.debug("Setting attribute {} to {} ... (shortened to 10 char)".format(attr, str_val))
else:
str_val
pass
class ___UserLoggers(object):
"""dumping ground for messy loggers
from User and Computer"""
def __init__(self):
pass
def logger_affords_card(self, num, name, cost, can_afford=True,wishlist=True):
""" Displays if the actor can afford the card
This is used both in User().turn() and Computer.turn()"""
wishlist = " Added to wish list" if wishlist else ""
afford = "can afford" if can_afford else "can not afford"
self.logger.debug("{5} {4} {0}x{1} at cost:{2}.{3}".format(num, name, cost, wishlist, can_afford, self.name))
pass
def logger_buy_card(self,card, self_money, change_in_money):
""" Displays the actor buying the card
This is used both in User().turn() and Computer.turn()"""
self.logger.debug("{3} bought 1x{0}, money:{1}+{2}".format(card.name, self_money, change_in_money,self.name))
pass
def logger_compare_cards(self, val_name, is_isnt, pot_val, des_val):
""" Displays the comparison made by the Computer
when picking cards for its wishlist
Only used in Computer.turn()"""
self.logger.debug("Potential card ({2}:{0}) {3} higher {2} than desired card ({2}:{1})".format(
pot_val,des_val,val_name,is_isnt))
pass
def logger_new_desired(self):
""" Displays the new most desired card by the computer
when iterating through its wishlist
Only used in Computer.turn()"""
self.logger.debug("New desired card index: {}".format(self.potential_card_index))
pass
# separates classes in my editor
@wrap_all(log_me)
class Central(CommonActions,__CentralLoggers):
"""The Central Deck Class"""
def __init__(self, parent, hand_size, deck_settings, name, supplements):
"""initial settings for the central cards"""
self.parent = parent
self.art = Art() # create art for game
# store initial state
self.init = {attr:val for attr,val in locals().iteritems() if attr != 'self'}
# logging
get_logger(self)
self.player_logger = self.logger.game
self.hand_size = hand_size
self.name = name
self.deck_settings = deck_settings
self.supplement_settings = supplements
# create newgame paramters
self.newgame()
def newgame(self):
"""Initiates a new game by refreshing saved config parameters"""
self.active = []
# revert to initial state stored in self.init
for attr, val in self.init.iteritems():
setattr(self, attr, val)
# this produces a log to the screen
self.logger_attr_set(attr, val)
# create new decks
self.deck = self.deck_creator(self.deck_settings)
self.supplements = self.deck_creator(self.supplement_settings)
# shuffle decks
random.shuffle(self.deck)
pass
def deck_to_active(self):
""" moves cards from one item to another"""
for i in xrange(0, self.hand_size):
if len(self.deck) == 0:
self.logger.debug("Deck length is also zero!")
self.logger.debug("Exiting the deck_to_active routine as no more cards.")
return
card = self.deck.pop()
self.active.append(card)
self.logger.debug('iteration #{}: Moving {} from deck to active'.format(i, card.name))
pass
def print_supplements(self, index=False, logger=None):
"""Display supplements"""
title = "Supplements (remaining: {})".format(len(self.supplements))
title = self.art.make_title(title)
# make the title of the supplements
if logger:
logger(title)
else:
self.player_logger(title)
# print the supplements
if len(self.supplements) == 0:
self.logger.debug("There are no supplements!")
self.logger.game(self.art.index_buffer+ \
"Nothing interesting to see here...")
else:
self.logger.debug(\
"There are {} supplements remaining".format(len(self.supplements)))
supplement = str(self.supplements[0])
num_str = "[S] " if index else self.art.index_buffer
self.logger.game(num_str + "{}".format(supplement))
# prints the underline
self.player_logger(self.art.underline)
pass
def display_all_active(self):
"""displays both active cards and the supplements"""
self.logger.game("")
self.print_active_cards(title="Central Buyable Cards")
self.print_supplements()
pass
# separates classes in my editor
@wrap_all(log_me)
class User(CommonActions, CommonUserActions, ___UserLoggers):
"""The User Class"""
def __init__(self, parent, hand_size, deck_settings, name, health):
"""initial settings for the User"""
self.parent = parent
self.art = Art() # create art for game
# store initial state
self.init = {attr:val for attr,val in locals().iteritems() if attr != 'self'}
# logging
get_logger(self)
self.player_logger = self.logger.user
self.hand_size = hand_size
self.name = name
self.health = health
self.deck_settings = deck_settings
# create newgame paramters
self.newgame()
def print_hand(self):
"""displays the indexed user hand"""
# Display User hand
self.player_logger("")
self.player_logger(self.art.make_title("Your Hand"))
self._print_cards(self.hand, index=True)
self.player_logger(self.art.underline)
pass
def turn(self):
"""Contains the User Actions UI"""
# iterators to count self.money
# and attack in players' hands
self.reset_vals() # resetes money / attack
# a first message is shown as an example
self.clear_delayed_messages()
self.add_delayed_message("Play cards to build Money and Attack.",self.logger.game)
self.add_delayed_message("Both Attack and Money will return to 0 at the end of your turn.", self.logger.game)
# User's Turn
while not self.parent.end(display=False):
self.parent.clear_term()
# Display health state
self.parent.display_health_status()
# display active deck and supplements
self.parent.central.display_all_active()
self.logger.game("")
self.show_updated_user_state()
self.print_delayed_messages()
# In-game actions UI
self.player_logger("")
self.player_logger(self.art.choose_action)
self.player_logger(self.art.card_options)
self.player_logger(self.art.game_options)
self.player_logger(self.art.underline)
# get user input
iuser_action = raw_input().upper()
self.logger.debug("User Input: {}".format(iuser_action))
if iuser_action == 'P': # Play all cards
self.logger.debug("Play all cards action selected (input: {}) ...".format(iuser_action))
if(len(self.hand)>0): # Are there cards in the hand
self.logger.debug("There are cards ({}) in the Users hand".format(len(self.hand)))
# transfer all cards from hand to active
# add values in hand to current totals
self.play_all_cards()
else: # there are no cards in the user's hand
self.add_delayed_message(
"There are no cards currently in your hand to play!", self.logger.game)
self.logger.debug("There are cards ({}) in the Users hand".format(len(self.hand)))
elif iuser_action.isdigit(): # Play a specific card
self.logger.debug("Play a single card action selected (input: {}) ...".format(iuser_action))
# check the card number is valid
if int(iuser_action) in xrange(0, len(self.hand)):
self.logger.debug("{} is a valid card number.".format(int(iuser_action)))
self.play_a_card(card_number=iuser_action)
elif len(self.hand) == 0:
self.logger.game("There are no cards currently in your hand to play!")
else:
self.logger.game("'{}' is not a valid option. Please try again.".format(iuser_action))
elif (iuser_action == 'B'): # Buy cards
self.logger.debug("Buy Cards action selected (input: {}) ...".format(iuser_action))
self.card_shop() # go to the shop to buy cards
elif iuser_action == 'A': # Attack
self.logger.debug("Attack action selected (input: {}) ...".format(iuser_action))
# output to screen from the attack
# get the name spacing correct
name_pad = self.parent.max_player_name_len
self.add_delayed_message(\
"{} Attacking!".format(self.name.ljust(name_pad)))
if self.attack != 0:
self.add_delayed_message(\
"{} Suffered a battering of -{} Health".format(
self.parent.computer.name.ljust(name_pad),self.attack),
self.parent.computer.player_logger)
else:
self.add_delayed_message(\
"{} Suffered -{} Health, whilst you make a rude gesture.".format(self.parent.computer.name.ljust(name_pad), self.attack),
self.parent.computer.player_logger)
self.add_delayed_message("", self.logger.game)
self.add_delayed_message(\
"Hint: Playing cards generates attack and money.", self.logger.game)
self.add_delayed_message(\
" Visit the shop to increase deck strength.", self.logger.game)
# the actual attack :)
self.attack_player(self.parent.computer)
elif iuser_action == 'E': # Ends turn
self.logger.debug("End Turn action selected (input: {}) ...".format(iuser_action))
break
elif iuser_action == 'Q': # Quit Game
self.logger.debug("User wants to quite the game")
self.parent.hostile_exit()
else:
self.logger.debug(
"No action matched to input (input: {}) ...".format(iuser_action))
self.add_delayed_message(
"'{}' is not a valid option. Please try again.".format(iuser_action),
self.logger.game)
# ends turn and prints debug message
self.end_turn()
pass
def card_shop(self):
"""contains the shop for buying cards"""
# clear any stored messages
self.clear_delayed_messages(in_shop=True)
# Check player has self.money available
while self.money > 0: # no warning of no self.money
self.parent.clear_term() # clear the screen
# welcome to the shop
self.logger.game(self.art.shop)
self.logger.game(self.art.underline)
self.logger.game("Cards bought here are added to your discard pile.")
self.logger.game("You will have a random chance to pick them at each new turn.")
self.logger.game("")
self.logger.debug("Starting new purchase loop with money: {}".format(self.money))
# Display central.central cards state
self.parent.central.print_active_cards("Central Buyable Cards", index=True)
self.parent.central.print_supplements(index=True)
self.logger.game("")
self.player_logger("Current money: {}".format(self.money))
# display delayed messages
self.print_delayed_messages(in_shop=True)
# User chooses a card to purchase
self.player_logger("")
self.player_logger(self.art.choose_action)
self.player_logger(self.art.shop_options)
self.player_logger(self.art.underline)
ibuy_input = raw_input().upper()
self.logger.debug("User Input: {}".format(ibuy_input))
if ibuy_input.isdigit() or ibuy_input == 'S': # users attempts to purcahse a card
self.purchase_cards(ibuy_input)
elif ibuy_input == 'E': # User ends shopping spree
self.logger.debug("End buying action selected (input: {}) ...".format(ibuy_input))
break
elif ibuy_input == 'Q': # Quit Game
self.logger.debug("User wants to quit the game")
self.parent.hostile_exit()
else: # cycle the shopping loop
self.logger.debug("No action matched to input (input: {}) ...".format(ibuy_input))
self.add_delayed_message(
"'{}' is not a valid option. Please try again.".format(ibuy_input),
self.logger.game, in_shop=True)
self.logger.debug("Exiting the card shop")
self.exit_card_shop()
pass
def purchase_cards(self, ibuy_input):
"""User purchases cards"""
# Evaluate choice
if ibuy_input == 'S': # Buy a supplement
self.logger.debug("Buy supplement action selected (input: {}) ...".format(ibuy_input))
self.buy_supplement() # buys a supplement subject to conditions - see function
elif ibuy_input.isdigit(): # Buy a card
self.logger.debug("Buy card {0} action selected (input: {0}) ...".format(ibuy_input))
if int(ibuy_input) in xrange(0,len(self.parent.central.active)): # If card exists
self.logger.debug("{} is a valid card number.".format(int(ibuy_input)))
self.buy_card_by_index(ibuy_input)
else:
self.logger.debug("{} is not valid card number for card for range:0-{}".format(int(ibuy_input),len(self.parent.central.active)))
self.add_delayed_message("Enter a valid index number", self.logger.game, in_shop=True)
pass
def buy_card_by_index(self, ibuy_input):
"""buys a particular card by index
it is assumed that an evaluation has already been made to assess
that the index is valid
"""
# Buy if User has enough self.money
# Move directly to discard pile
purchase_card = self.parent.central.active[int(ibuy_input)]
if self.money >= purchase_card.cost:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, can_afford=True,wishlist=False)
card = self.parent.central.active.pop(int(ibuy_input))
self.discard.append(card)
new_money = - card.cost
self.logger_buy_card(card, self.money, new_money)
self.money += new_money
# Refill active from self.parent.central.central deck
# if there are cards in self.parent.central.central
self.logger.debug("Attempting to refill card central active deck from central deck...")
if len(self.parent.central.deck) > 0:
self.logger.debug("{} cards in central deck".format(len(self.parent.central.deck)))
card = self.parent.central.deck.pop()
self.parent.central.active.append(card)
self.logger.debug("Moved 1x{} from {} to {}".format(card.name, "central deck", "central active deck"))
else:
# If no cards in self.parent.central.central deck,
# reduce activesize by 1
self.logger.debug("No cards in central deck to refill central active deck.")
self.logger.debug("central hand_size:{}-1".format(self.parent.central.hand_size))
self.parent.central.hand_size -= 1
self.add_delayed_message("Card bought", in_shop=True)
else:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, can_afford=False,wishlist=False)
self.add_delayed_message(
"Insufficient money to buy. Current money: {}".format(self.money),
in_shop=True)
pass
def buy_supplement(self):
"""buys a supplement from the parent.central"""
if len(self.parent.central.supplements) > 0: # If supplements exist
self.logger.debug("Supplements Detected by {}".format(self.name))
purchase_card = self.parent.central.supplements[0]
# Buy if player has enough self.money
# Move to player's discard pile
if self.money >= purchase_card.cost:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, can_afford=True,wishlist=False)
card = self.parent.central.supplements.pop()
self.discard.append(card)
new_money = - card.cost
self.logger_buy_card(card, self.money, new_money)
self.money += new_money
self.add_delayed_message("Supplement Bought.", in_shop=True)
else:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, can_afford=False,wishlist=False)
self.add_delayed_message(
"Insufficient money to buy. Current money: {}".format(self.money),
in_shop=True)
else:
self.logger.debug("No Supplements available")
self.add_delayed_message("No Supplements Left!", self.logger.game, in_shop=True)
pass
def show_updated_user_state(self):
"""Shows the updated / current user state"""
self.print_active_cards() # Display User active cards
self.print_hand() # Display User hand
self.display_values() # Display PC state
pass
def clear_delayed_messages(self, in_shop=False):
"""clears ready for turn"""
if in_shop:
self.delayed_shop_messages = []
else:
self.delayed_messages = []
pass
def add_delayed_message(self, msg, logger=None, in_shop=False):
"""add a delayed message"""
if logger is None: logger = self.player_logger
msg_dict = {"msg":msg, "logger":logger}
if in_shop:
self.delayed_shop_messages.append(msg_dict)
else:
self.delayed_messages.append(msg_dict)
pass
def print_delayed_messages(self, in_shop=False):
"""prints all the delayed messages"""
if in_shop:
iterator = self.delayed_shop_messages
else:
iterator = self.delayed_messages
while iterator:
item = iterator.pop(0)
msg = item["msg"]
logger = item["logger"]
logger(msg) # use logger from dict to output message
pass
def exit_card_shop(self):
"""UI for exiting the card shop"""
# user is ungracefully booted from shop
if self.money == 0:
self.parent.clear_term() # clear the screen
# welcome to the shop
self.logger.game(self.art.shop)
self.logger.game(self.art.underline)
self.logger.game("Cards bought here are added to your discard pile")
self.logger.game("")
self.print_delayed_messages(in_shop=True)
self.logger.game("Unfortunately, you have no remaining money...")
self.logger.game("You are being kicked out of the shop.")
self.parent.wait_for_user()
else: # else user has a nice quick exit
pass
self.add_delayed_message("You return from the Shop.", self.logger.game)
pass
# separates classes in my editor
@wrap_all(log_me)
class Computer(CommonActions, CommonUserActions, ___UserLoggers):
"""The Computer Player Class"""
def __init__(self, parent, hand_size, deck_settings, name, health):
"""initial settings for the computer player"""
self.parent = parent
self.art = Art() # create art for game
# store initial state
self.init = {attr:val for attr,val in locals().iteritems() if attr != 'self'}
# intialise params
self.hand_size = hand_size
self.name = name
self.health = health
self.deck_settings = deck_settings
self.aggressive = True
# logging
get_logger(self)
self.player_logger = self.logger.computer
# create newgame paramters
self.newgame()
def turn(self):
"""contains the computer turn routines"""
# Iterators to count money
# and attack in User's hands
self.parent.clear_term()
self.reset_vals() # reset money and attack to zero
# transfer all cards from hand to active
# add values in hand to current totals
self.play_all_cards()
self.logger.debug("Storing computer values ready for attack")
stored_attack = self.attack
stored_money = self.money
# PC starts by attacking User
self.attack_player(self.parent.user)
# Display health state
self.parent.display_health_status()
# Display PC state
self.logger.debug("Displaying stored computer values from before attack")
self.display_values(stored_attack, stored_money)
# Display PC state
name_pad = self.parent.max_player_name_len
self.player_logger(\
"{} Attacking!".format(self.name.ljust(name_pad)))
self.parent.user.player_logger(\
"{} Suffered a beating of -{} Health".format(
self.parent.user.name.ljust(name_pad),stored_attack))
self.logger.debug("Displaying stored computer values from AFTER attack")
self.display_values()
computer_buys_title = self.art.make_title("{} Buying".format(self.name))
self.player_logger(computer_buys_title)
self.purchase_cards()
self.player_logger(self.art.underline)
self.player_logger("")
self.end_turn()
self.player_logger("{} turn ending".format(self.name))
self.parent.wait_for_user()
pass
def purchase_cards(self):
"""This routine contains the actions required for the computer
to make card purchases"""
can_afford_cards = True
if can_afford_cards and self.money > 0: # Commence buying if PC has money
self.logger.debug("Starting new purchase loop with money: {}".format(self.money))
self.player_logger("")
self.player_logger("{} is browsing... Money: {}".format(
self.name ,self.money))
# Loop while cb, conditions:
# len(self.wish_list) > 0 and money != 0
# The temporary list of purchased
# cards in the buying process
self.wish_list = self.get_wish_list()
if len(self.wish_list) > 0: # If more than one card was added to self.wish_list
self.logger.debug("Wish list is not empty ({} cards)".format(len(self.wish_list)))
self.desired_card_index = 0 # Index of most desirable card purchase
# Loop through the temp list by index
# Identifies the highest value item in the list
# Prioritises on attack (self.aggressive) or self.money (greedy)
# if equal values
self.logger.debug("Finding the most desirable purchase...")
self.desired = self.most_desirable_card_in_wishlist()
# Contains two parts of information:
# 1. If integer then it is a card from the active deck
# 2. If non-integer then it is a supplement
#
# If 1. then the integer may take a value
# between 0 and up to (not including) the size
# if the active deck
card_index = self.desired[0]
self.logger.debug("{0} attempts to purchase {1} (Index: {2})".format(self.name, *self.desired))
self.buy_card_by_index(card_index)
# ^Loop: Buy another card
else: # Exit loop if PC couldn't buy any cards
can_afford_cards = False # this could be a break statement but this ismore obvious
self.logger.debug("Wish list is empty ({} cards)".format(len(self.wish_list)))
if self.money == 0: # Exit loop if no money
# This is a subcomparison that of the above
# This will just exit the loop 1 cycle earlier
self.logger.debug("{} has no money. Exiting wish list loop (money: {})".format(self.name, self.money))
else: # Don't buy if no money
self.logger.debug("{} has no money. Exiting purchase loop with money: {}".format(self.name, self.money))
self.player_logger("No Money to buy anything")
pass
def buy_card_by_index(self, source):
"""Attempts to buy a card given a source
Expected format of source = integer or 'S'"""
# This is a card from the active deck
for source,purchase_card in enumerate(self.parent.central.active):
self.logger.debug("Index: {} found in Central Hand ({}, cost:{})".format(
source, purchase_card.name, purchase_card.cost))
# If PC has money to purchase:
# comparison has alrady been made
if self.money >= purchase_card.cost:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, wishlist=False)
# Add card to PC discard pile
card = self.parent.central.active.pop(source)
self.discard.append(card)
self.logger.game("Card bought... {}".format(card))
new_money = - card.cost
self.logger_buy_card(card, self.money, new_money)
self.money += new_money
# Refill active from self.parent.central.central deck
# if there are cards in self.parent.central.central
self.logger.debug("Attempting to refill card central active deck from central deck...")
if len(self.parent.central.deck) > 0:
self.logger.debug("{} cards in central deck".format(len(self.parent.central.deck)))
card = self.parent.central.deck.pop()
self.parent.central.active.append(card)
self.logger.debug("Moved 1x{} from {} to {}".format(card.name, "central deck", "central active deck"))
else:
# If no cards in self.parent.central.central deck,
# reduce activesize by 1
self.logger.debug("No cards in central deck to refill central active deck.")
self.logger.debug("central hand_size:{}-1".format(self.parent.central.hand_size))
self.parent.central.hand_size -= 1
else:
# This is kept here to avoid future errors that may be introduced
# This is already verified as never being initiated earlier in the code
# see can_afford_cards assignment
self.logger.debug("Developer Error: There are no supplements to buy! {}")
self.logger.debug("Money: {}".format(self.money))
return
else: # This is a supplement as it is not in the range [0,5]
# If PC has money to purchase:
# comparison has alrady been made
if len(self.parent.central.supplements) > 0:
purchase_card = self.parent.central.supplements[0]
if self.money >= purchase_card.cost:
self.logger_affords_card(1, purchase_card.name, purchase_card.cost, wishlist=False)
card = self.parent.central.supplements.pop()
self.discard.append(card)
self.player_logger("Supplement Bought {}".format(card))
new_money = - card.cost
self.logger_buy_card(card, self.money, new_money)
self.money += new_money
else:
self.logger.debug("Not enough money to buy {}".format(purchase_card))
self.logger.debug("Money: {}".format(self.money))
return
else:
# This is kept here to avoid future errors that may be introduced
# This is already verified as never being initiated earlier in the code
# see can_afford_cards assignment
self.logger.debug("Developer Error: There are no supplements to buy! {}")
self.logger.debug("Money: {}".format(self.money))
return
pass
def get_wish_list(self):
"""Gets the list of cards that the computer wishes to try and buy"""
self.wish_list = [] # This will be a list of tuples
self.logger.debug("Temp purchase list (wish list) initiated")
self.__add_affordable_supplements_to_wishlist()
self.__add_affordable_cards_to_wishlist()
return self.wish_list
def most_desirable_card_in_wishlist(self):
"""This routine expects that self.wish_list exists in the format
of [( val, Card() )]
where val = integer or "S"
It returns a single list element corresponding to the most desired card
"""
desired = self.wish_list[self.desired_card_index]
for self.potential_card_index in xrange(0,len(self.wish_list)):
potential = self.wish_list[self.potential_card_index]
self.logger.debug("Current most desired card: {}".format(desired[1].name))
self.logger.debug("Comparing against potential card: {}".format(potential[1].name))
self.card_selector_AI(desired, potential)
desired = self.wish_list[self.desired_card_index]
return desired
def card_selector_AI(self, desired, potential):
"""The computer AI that decides which card it likes the most to buy
between two cards provided
This function relies on two key global variables:
self.desired_card_index
self.potential_card_index
Expected format of desired, potential:
( val, Card() )
where val = integer or "S"
"""
# Primary comparison: Get most expensive card
self.logger.debug("Primary comparison (Cost) ...")
self.__primary_card_selector_AI(desired, potential)
if potential[1].cost == desired[1].cost:
# Secondary comparison: AI chosen strategy
self.__secondary_card_selector_AI(desired, potential)
else:
self.logger.debug("Secondary comparison (Strategy Dependent) not undertaken.")
pass
def __primary_card_selector_AI(self, desired, potential):
"""This is the first method that the computer uses to decide on a card purchase
This function relies on two key global variables:
self.desired_card_index
self.potential_card_index
Expected format of desired, potential:
( val, Card() )
where val = integer or "S"
"""
if potential[1].cost > desired[1].cost:
self.desired_card_index = self.potential_card_index
self.logger_compare_cards("cost", "is", potential[1].cost, desired[1].cost)
self.logger_new_desired() # Log the action
else:
self.logger_compare_cards("cost", "is not", potential[1].cost, desired[1].cost)
pass
def __secondary_card_selector_AI(self, desired, potential):
"""This is the first method that the computer uses to decide on a card purchase
This function uses the self.aggressive variable to decide how to proceeed
This function relies on two key global variables:
self.desired_card_index
self.potential_card_index
Expected format of desired, potential:
( val, Card() )
where val = integer or "S"
"""
self.logger.debug("Secondary comparison (Strategy Dependent)...")
if self.aggressive: # Aggresive strategy
self.logger.debug("Using Aggressive strategy")
self.__secondary_ai_aggressive_comparison(potential, desired)
else: # Greedy strategy
self.logger.debug("Using Non-Aggressive strategy")
self.__secondary_ai_nonaggressive_comparison(potential, desired)
pass
def __secondary_ai_nonaggressive_comparison(self, potential, desired):
"""This routine is used if the computer is set to aggressive
This function relies on two key global variables:
self.desired_card_index
self.potential_card_index
Expected format of desired, potential:
( val, Card() )
where val = integer or "S"
"""
# Set self.desired_card_index to this card if highest money
if potential[1].get_attack() > desired[1].get_money():
self.desired_card_index = self.potential_card_index
self.logger_compare_cards("money", "is", potential[1].money, desired[1].money)
self.logger_new_desired() # Log the action
else:
self.logger_compare_cards("money", "is not", potential[1].money, desired[1].money)
pass
def __secondary_ai_aggressive_comparison(self, potential, desired):
"""This routine is used if the computer is set to aggressive
This function relies on two key global variables:
self.desired_card_index
self.potential_card_index
Expected format of desired, potential:
( val, Card() )
where val = integer or "S"
"""
# Set self.desired_card_index to this card if highest attack
if potential[1].get_attack() > desired[1].get_attack():
self.desired_card_index = self.potential_card_index
self.logger_compare_cards("attack", "is", potential[1].attack, desired[1].attack)
self.logger_new_desired() # Log the action
else:
self.logger_compare_cards("attack", "is not", potential[1].attack, desired[1].attack)
pass
def __add_affordable_cards_to_wishlist(self):
"""adds the affordable cards the the wish_list
expects that self.wish_list exists as a list
"""
# Select cards where cost of card_i < money
for self.potential_card_index, card in enumerate(self.parent.central.active): # Loop all cards
self.logger_new_desired() # Log the action
if card.cost <= self.money: # if PC has enough money
# Add to temporary purchases
self.wish_list.append((self.potential_card_index, card))
self.logger_affords_card(1, card.name, card.cost) # logger action
else:
self.logger_affords_card(1, card.name, card.cost, can_afford=False) # logger action
pass
def __add_affordable_supplements_to_wishlist(self):
"""adds the affordable cards the the wish_list
expects that self.wish_list exists as a list
"""
# Select Supplements if cost < self.money
if len(self.parent.central.supplements) > 0: # If there are any supplements
self.logger.debug("Supplements Detected by {}".format(self.name)) # logger action
card = self.parent.central.supplements[0]
if card.cost <= self.money: # If PC has enough money
# Add to temporary purchases
self.wish_list.append(("S", card))
self.logger_affords_card(1, card.name, card.cost) # logger action
else:
self.logger_affords_card(1, card.name, card.cost, can_afford=False) # logger action
else:
self.logger.debug("No Supplements available")
pass
|
flipdazed/SoftwareDevelopment
|
actors.py
|
Python
|
gpl-3.0
| 38,455
|
[
"VisIt"
] |
f5fbb160c7f38ba85244fbd65cc9bf28f0e487d550a44ae86fc5a5846e5c1f6e
|
#!/usr/bin/env python
import numpy as np
import scipy.interpolate as spi
import scipy.optimize as spo
import bulk_run_phonons
import fit_beta_V
import process_PVT_castep
import bm3_eos as eos
import earthref
import ionic_model
earth_model = earthref.EarthModel(earthref.ak135)
# Define constants
eps0 = 8.854187817E-12 # Vacuum permittivity (F/m)
e = 1.60217662E-19 # electron charge (C)
# Conversion factors
m2ang = 1.0E10
j2ev = 6.242E18
def depth_PT(depth):
"""Retrun liquidus P and T at a given depth in a magma ocean
Liquidus data Andrault et at. 2011 (EPSL doi:10.1016/j.epsl.2011.02.006)
who fit a modified Simmon and Glatzel equation:
T = T0 (P/a+1_^(1/c)
(see section 3.4) with parameters listed below. This replaces a
previous linear fit to data at 0 and 60 GPa.
"""
P = earth_model(6371-depth) # Interpolating AK135...
# We now have P, T is from TP plot
T_0 = 1940.0 # virtual liqidus temperature at 0 GPa
a = 26.0 # GPa
c = 1.9
T = T_0 * ((P / a) + 1)**(1/c)
return T, P
def fit_beta(files, supercell=False):
paths_and_seeds = bulk_run_phonons.process_paths(files)
data = fit_beta_V.get_data(paths_and_seeds, supercell=supercell)
A1, A2, A3, B1, B2, B3, C1, C2, C3 = fit_beta_V.fit_beta_T_V(data, plot=False)
def get_beta_T_V(T, V):
return fit_beta_V.ln_beta_V_function_wrap(T, V, A1, A2, A3, B1,
B2, B3, C1, C2, C3)
return np.vectorize(get_beta_T_V)
def fit_PVT_EOS_params(files):
data = []
for f in files:
print(f)
data = process_PVT_castep.parse_castep_file(f, data)
Ts = [0, 500, 1000, 1500, 2000, 2500, 3000, 3500]
Vs = []
Fs = []
K0s = []
Kp0s = []
E0s = []
V0s = []
for T in Ts:
V, F = process_PVT_castep.get_VF(data, T)
V0, E0, K0, Kp0 = eos.fit_BM3_EOS(V, F, verbose=True)
Vs.append(V)
Fs.append(F)
K0s.append(K0)
Kp0s.append(Kp0)
E0s.append(E0)
V0s.append(V0)
fV0, fE0, fK0, fKp0 = eos.fit_parameters_quad(Ts, V0s, E0s, K0s, Kp0s,
plot=False)
def get_volume(P, T):
return eos.get_V(P, T, fV0, fK0, fKp0)
return np.vectorize(get_volume)
if __name__ == "__main__":
import glob
import matplotlib
import matplotlib.pyplot as plt
# Depth range of interest
depths = np.linspace(0.0, 2800.0, num=200)
# Get our list of Ps and Ts
Ts, Ps = depth_PT(depths)
# Volume of MgO
MgO_eos = fit_PVT_EOS_params(
glob.glob('../free_energy/MgO/MgO_*GPa/MgO.castep'))
MgO_Vs = MgO_eos(Ps, Ts)
MgO_Vs_athermal = MgO_eos(Ps, np.zeros_like(Ts))
# Volume of MgSiO3 Pv
MgSiO3_eos = fit_PVT_EOS_params(
glob.glob('../free_energy/MgSiO3/MgSiO3_*GPa/MgSiO3.castep'))
MgSiO3_Vs = MgSiO3_eos(Ps, Ts)
MgSiO3_Vs_athermal = MgSiO3_eos(Ps, np.zeros_like(Ts))
# Volume of MgSiO3 Pv
Mg2SiO4_eos = fit_PVT_EOS_params(
glob.glob('../free_energy/Mg2SiO4/Mg2SiO4_*GPa/Mg2SiO4.castep'))
Mg2SiO4_Vs = Mg2SiO4_eos(Ps, Ts)
Mg2SiO4_Vs_athermal = Mg2SiO4_eos(Ps, np.zeros_like(Ts))
# 1000.ln(beta) for MgO
MgO_beta_fun = fit_beta(glob.glob('../free_energy/MgO/MgO_*GPa/MgO.castep'))
MgO_betas = MgO_beta_fun(Ts, MgO_Vs)
MgO_betas_athermal = MgO_beta_fun(Ts, MgO_Vs_athermal)
# 1000.ln(beta) for MgSiO3
MgSiO3_beta_fun = fit_beta(glob.glob('../free_energy/MgSiO3/MgSiO3_*GPa/MgSiO3.castep'))
MgSiO3_betas = MgSiO3_beta_fun(Ts, MgSiO3_Vs)
MgSiO3_betas_athermal = MgSiO3_beta_fun(Ts, MgSiO3_Vs_athermal)
# 1000.ln(beta) for Mg2SiO3
Mg2SiO4_beta_fun = fit_beta(glob.glob('../free_energy/Mg2SiO4/Mg2SiO4_*GPa/Mg2SiO4.castep'))
Mg2SiO4_betas = Mg2SiO4_beta_fun(Ts, Mg2SiO4_Vs)
Mg2SiO4_betas_athermal = Mg2SiO4_beta_fun(Ts, Mg2SiO4_Vs_athermal)
print("Done fitting... now some key data" )
print("P(GPa) T(K) Depth(km), 1000.ln(alpha(Fo, MgO)), 1000.ln(alpha(Fo,MgPv)")
for P, T, D, B_Fo, B_MgO, B_MgPv in zip(Ps, Ts, depths, Mg2SiO4_betas, MgO_betas, MgSiO3_betas):
print(P, T, D, B_Fo-B_MgO, B_Fo-B_MgPv)
print("Sorting out the melt")
# First calculate fudge
# 1000.ln( beta(melt)) - 1000.ln (beta(ol)) is -0.080 at 1573K and 0 GPa.
melt_poly_coef = [1.9613, -0.00165, 0.0000019]
melt_coord_val = np.array(([4.93, 5.4, 6, 6.7, 7.25, 7.62, 7.85]))
melt_coord_pressure = np.array(([0.1, 2.5, 7.2, 16.3, 34.3, 72.1, 159.4]))
coord_spline = spi.InterpolatedUnivariateSpline(melt_coord_pressure, melt_coord_val)
all_popt = [ 2.32716768, -0.93910997, 0.06109785] # From fitting MgO
def kf(r0, zi, zj, n):
"""
Calculate force constant for Born-Mayer type interionic potential
r_0 - equilibrium distance between ions (m); can be array
zi, zj - charges on ions (electrons)
n - exponent for repulsive part (-); typically ~12
returns force constant (J/m^n)
"""
k = (zi * zj * e**2 * (1-n)) / (4.0 * np.pi * eps0 * r0**3)
return k
def calc_beta_model(r, coord, t, qfac0, qfac1, qfacgrd):
qfac = qfac0 + r*qfac1 + coord*qfacgrd
n = 12
k = kf(r*1E-10, 2.0*qfac, -2.0*qfac, n)
beta = ionic_model.ionic_model_beta(k, t)
return beta
def find_beta_qfac0(r, coord, t, qfac0, qfac1, qfacgrd, target):
def func(qf):
return calc_beta_model(r, coord, t, qf, qfac1, qfacgrd) - target
print("-5", func(-5.0))
print("-2", func(-2.0))
print("-1", func(-1.0))
print("0", func(0.0))
print("2", func(2.0))
print("5", func(5.0))
new_qf, r = spo.brentq(func, 2.0, 5.0, full_output=True)
print(r)
return new_qf
def find_pressure_correction(melt_poly_coef, melt_coord_spline, t, qf, qfac1, qfacgrd, target):
def err_func(dp):
r = ionic_model.melt_bond_length(dp, melt_poly_coef)*1E10
coord = melt_coord_spline(dp)
return calc_beta_model(r, coord, t, qf, qfac1, qfacgrd) - target
dp, rootres = spo.brentq(err_func, -10.0, 10.0, full_output=True)
print(rootres)
return dp
measured_fractionation = 0.080
measured_temperature = 1573.0
measured_pressure = 0.0
r_melt = ionic_model.melt_bond_length(0.0, melt_poly_coef)
coord_melt = coord_spline(0.0)
beta_melt = calc_beta_model(r_melt*1E10, coord_melt, 1573.0, *all_popt)
beta_ol = Mg2SiO4_beta_fun(1573, Mg2SiO4_eos(measured_pressure, measured_temperature))
print("Calculated melt is:",
beta_melt,
"per mill")
print("Calculated Fo frac is:",
beta_ol,
"per mill")
print("Calculated melt - Fo frac is:",
beta_melt - beta_ol,
"per mill")
print("Observed melt - Fo frac is:", measured_fractionation, "per mill")
print("Applying model correction for pressure")
dp = find_pressure_correction(melt_poly_coef, coord_spline, 1573.0, all_popt[0],
all_popt[1], all_popt[2], measured_fractionation + beta_ol)
print("dp is", dp)
beta_melt = calc_beta_model(ionic_model.melt_bond_length(dp, melt_poly_coef)*1E10, coord_spline(dp), 1573.0, *all_popt)
print("Calculated melt - Fo frac is NOW:",
beta_melt - beta_ol,
"per mill")
print("Observed melt - Fo frac is:", measured_fractionation, "per mill")
melt_ln_betas = calc_beta_model(ionic_model.melt_bond_length(Ps+dp, melt_poly_coef)*1E10,
coord_spline(Ps+dp), Ts, *all_popt)
# And again for the athermal case
beta_ol_athermal = Mg2SiO4_beta_fun(1573, Mg2SiO4_eos(measured_pressure, 0.0))
print("Applying model correction for pressure")
dpa = find_pressure_correction(melt_poly_coef, coord_spline, 1573.0, all_popt[0],
all_popt[1], all_popt[2], measured_fractionation + beta_ol_athermal)
print("dp is", dp)
melt_ln_betas_athermal = calc_beta_model(ionic_model.melt_bond_length(Ps+dpa, melt_poly_coef)*1E10,
coord_spline(Ps+dpa), Ts, *all_popt)
print("Done fitting... now plotting")
f, ax1 = plt.subplots()
fs = 14
fs_l = fs
ax_depths = np.array([0, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000, 2200,
2400, 2600, 2800])
ax_Ts, ax_Ps = depth_PT(ax_depths)
ax1.invert_yaxis()
ax1.plot(ax_Ts, ax_Ps, ':g')
ax1.set_xlim(left=2000, right=5000)
ax1.set_xlabel("T (K)", fontsize=fs)
ax1.set_ylabel("P (GPa)", fontsize=fs)
ax1.tick_params(axis='both', which='both', labelsize=fs_l)
ax2 = ax1.twinx()
ax2.invert_yaxis()
ax2.plot(ax_Ts, ax_depths, alpha=0) # invisable
ax2.set_ylabel("Depth (km)", fontsize=fs)
ax2.tick_params(axis='both', which='both', labelsize=fs_l)
ax3 = ax2.twiny()
# For Tim's latest we want Mg25 - aparantly half the fractionation
mg_25 = False
if mg_25:
ax3.set_xlabel(r"$\Delta^{}$Mg (per mill) relative to forsterite".format('{25}'), fontsize=fs)
ax3.set_xlim(left=0.0, right=0.12/2.0)
else:
ax3.set_xlabel(r"$\Delta^{}$Mg (per mill) relative to forsterite".format('{26}'), fontsize=fs)
#ax3.set_xlim(left=0.0, right=0.12)
ax3.tick_params(axis='both', which='both', labelsize=fs_l)
if mg_25:
ax3.plot((Mg2SiO4_betas - MgSiO3_betas)/2.0, depths, 'r-')
else:
ax3.plot((Mg2SiO4_betas_athermal - MgO_betas_athermal), depths, 'b--')
ax3.plot((Mg2SiO4_betas - MgO_betas), depths, 'b-')
ax3.plot((Mg2SiO4_betas - MgSiO3_betas), depths, 'r-')
ax3.plot((Mg2SiO4_betas_athermal - MgSiO3_betas_athermal), depths, 'r--')
ax3.plot((Mg2SiO4_betas_athermal - melt_ln_betas_athermal), depths, 'y--')
ax3.plot((Mg2SiO4_betas - melt_ln_betas), depths, 'y-')
#ax2 = ax1.twinx()
#ax2_tick_ds = np.array([200, 400, 600, 800, 1000])
#ax2_tick_Ps, ax2_tick_Ts = depth_PT(ax2_tick_ds)
#ax2_tick_labs = ["200", "400", "600", "800", "1000"]
#ax2.set_ylabel("P (GPa)")
#ax2.set_yticks(ax2_tick_ds)
#ax2.set_yticks(ax2_tick_labs)
f.tight_layout()
f.savefig("alpha_geotherm_6_liqidus.pdf")
#plt.show()
# New plot for melt...
# Plotting
f, ax1 = plt.subplots()
fs = 14
fs_l = fs
ax_depths = np.array([0, 200, 400, 600, 800, 1000, 1200, 1400, 1600, 1800, 2000, 2200,
2400, 2600, 2800])
ax_Ts, ax_Ps = depth_PT(ax_depths)
ax1.invert_yaxis()
ax1.plot(ax_Ts, ax_Ps, ':g')
ax1.set_xlim(left=2000, right=5000)
ax1.set_xlabel("T (K)", fontsize=fs)
ax1.set_ylabel("P (GPa)", fontsize=fs)
ax1.tick_params(axis='both', which='both', labelsize=fs_l)
ax2 = ax1.twinx()
ax2.invert_yaxis()
ax2.plot(ax_Ts, ax_depths, alpha=0) # invisable
ax2.set_ylabel("Depth (km)", fontsize=fs)
ax2.tick_params(axis='both', which='both', labelsize=fs_l)
ax3 = ax2.twiny()
# For Tim's latest we want Mg25 - aparantly half the fractionation
mg_25 = False
if mg_25:
ax3.set_xlabel(r"$\Delta^{}$Mg (per mill) relative to liquid".format('{25}'), fontsize=fs)
ax3.set_xlim(left=0.0, right=0.12/2.0)
else:
ax3.set_xlabel(r"$\Delta^{}$Mg (per mill) relative to liquid".format('{26}'), fontsize=fs)
#ax3.set_xlim(left=0.0, right=0.12)
ax3.tick_params(axis='both', which='both', labelsize=fs_l)
if mg_25:
ax3.plot((melt_ln_betas - MgSiO3_betas)/2.0, depths, 'r-')
else:
ax3.plot((melt_ln_betas_athermal - Mg2SiO4_betas_athermal), depths, 'k--')
ax3.plot((melt_ln_betas - Mg2SiO4_betas), depths, 'k-')
ax3.plot((melt_ln_betas_athermal - MgO_betas_athermal), depths, 'b--')
ax3.plot((melt_ln_betas - MgO_betas), depths, 'b-')
ax3.plot((melt_ln_betas - MgSiO3_betas), depths, 'r-')
ax3.plot((melt_ln_betas_athermal - MgSiO3_betas_athermal), depths, 'r--')
#ax2 = ax1.twinx()
#ax2_tick_ds = np.array([200, 400, 600, 800, 1000])
#ax2_tick_Ps, ax2_tick_Ts = depth_PT(ax2_tick_ds)
#ax2_tick_labs = ["200", "400", "600", "800", "1000"]
#ax2.set_ylabel("P (GPa)")
#ax2.set_yticks(ax2_tick_ds)
#ax2.set_yticks(ax2_tick_labs)
f.tight_layout()
f.savefig("alpha_geotherm_6_liqidus_melt.pdf")
plt.show()
|
andreww/isofrac
|
alpha_geotherm_6.py
|
Python
|
bsd-3-clause
| 12,556
|
[
"CASTEP"
] |
19220fe397c9883d322dac70b298420574923f11e9a04b502e15dab331d4446f
|
# textrender.py
# module to render text, tries to understand a basic LateX-like syntax
# Copyright (C) 2003 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
from __future__ import division
import math
import re
import numpy as N
from ..compat import cbasestr, cstr
from .. import qtall as qt4
from . import points
mmlsupport = True
try:
from ..helpers import qtmml
from ..helpers import recordpaint
except ImportError:
mmlsupport = False
try:
from ..helpers.qtloops import RotatedRectangle
except ImportError:
from .slowfuncs import RotatedRectangle
# this definition is monkey-patched when OpenReliability is running in self-test
# mode as we need to hack the metrics - urgh
FontMetrics = qt4.QFontMetricsF
# lookup table for special symbols
symbols = {
# escaped characters
r'\_': '_',
r'\^': '^',
r'\{': '{',
r'\}': '}',
r'\[': '[',
r'\]': ']',
r'\backslash' : u'\u005c',
# operators
r'\pm': u'\u00b1',
r'\mp': u'\u2213',
r'\times': u'\u00d7',
r'\cdot': u'\u22c5',
r'\ast': u'\u2217',
r'\star': u'\u22c6',
r'\deg': u'\u00b0',
r'\divide': u'\u00f7',
r'\dagger': u'\u2020',
r'\ddagger': u'\u2021',
r'\cup': u'\u22c3',
r'\cap': u'\u22c2',
r'\uplus': u'\u228e',
r'\vee': u'\u22c1',
r'\wedge': u'\u22c0',
r'\nabla': u'\u2207',
r'\lhd': u'\u22b2',
r'\rhd': u'\u22b3',
r'\unlhd': u'\u22b4',
r'\unrhd': u'\u22b5',
r'\oslash': u'\u2298',
r'\odot': u'\u2299',
r'\oplus': u'\u2295',
r'\ominus': u'\u2296',
r'\otimes': u'\u2297',
r'\diamond': u'\u22c4',
r'\bullet': u'\u2022',
r'\AA': u'\u212b',
r'\sqrt': u'\u221a',
r'\propto': u'\u221d',
r'\infty': u'\u221e',
r'\int': u'\u222b',
r'\leftarrow': u'\u2190',
r'\Leftarrow': u'\u21d0',
r'\uparrow': u'\u2191',
r'\rightarrow': u'\u2192',
r'\to': u'\u2192',
r'\Rightarrow': u'\u21d2',
r'\downarrow': u'\u2193',
r'\leftrightarrow': u'\u2194',
r'\Leftrightarrow': u'\u21d4',
r'\circ': u'\u25cb',
r'\ell': u'\u2113',
# relations
r'\le': u'\u2264',
r'\ge': u'\u2265',
r'\neq': u'\u2260',
r'\sim': u'\u223c',
r'\ll': u'\u226a',
r'\gg': u'\u226b',
r'\doteq': u'\u2250',
r'\simeq': u'\u2243',
r'\subset': u'\u2282',
r'\supset': u'\u2283',
r'\approx': u'\u2248',
r'\asymp': u'\u224d',
r'\subseteq': u'\u2286',
r'\supseteq': u'\u2287',
r'\sqsubset': u'\u228f',
r'\sqsupset': u'\u2290',
r'\sqsubseteq': u'\u2291',
r'\sqsupseteq': u'\u2292',
r'\in': u'\u2208',
r'\ni': u'\u220b',
r'\equiv': u'\u2261',
r'\prec': u'\u227a',
r'\succ': u'\u227b',
r'\preceq': u'\u227c',
r'\succeq': u'\u227d',
r'\bowtie': u'\u22c8',
r'\vdash': u'\u22a2',
r'\dashv': u'\u22a3',
r'\models': u'\u22a7',
r'\perp': u'\u22a5',
r'\parallel': u'\u2225',
r'\umid': u'\u2223',
# lower case greek letters
r'\alpha': u'\u03b1',
r'\beta': u'\u03b2',
r'\gamma': u'\u03b3',
r'\delta': u'\u03b4',
r'\epsilon': u'\u03b5',
r'\zeta': u'\u03b6',
r'\eta': u'\u03b7',
r'\theta': u'\u03b8',
r'\iota': u'\u03b9',
r'\kappa': u'\u03ba',
r'\lambda': u'\u03bb',
r'\mu': u'\u03bc',
r'\nu': u'\u03bd',
r'\xi': u'\u03be',
r'\omicron': u'\u03bf',
r'\pi': u'\u03c0',
r'\rho': u'\u03c1',
r'\stigma': u'\u03c2',
r'\sigma': u'\u03c3',
r'\tau': u'\u03c4',
r'\upsilon': u'\u03c5',
r'\phi': u'\u03c6',
r'\chi': u'\u03c7',
r'\psi': u'\u03c8',
r'\omega': u'\u03c9',
# upper case greek letters
r'\Alpha': u'\u0391',
r'\Beta': u'\u0392',
r'\Gamma': u'\u0393',
r'\Delta': u'\u0394',
r'\Epsilon': u'\u0395',
r'\Zeta': u'\u0396',
r'\Eta': u'\u0397',
r'\Theta': u'\u0398',
r'\Iota': u'\u0399',
r'\Kappa': u'\u039a',
r'\Lambda': u'\u039b',
r'\Mu': u'\u039c',
r'\Nu': u'\u039d',
r'\Xi': u'\u039e',
r'\Omicron': u'\u039f',
r'\Pi': u'\u03a0',
r'\Rho': u'\u03a1',
r'\Sigma': u'\u03a3',
r'\Tau': u'\u03a4',
r'\Upsilon': u'\u03a5',
r'\Phi': u'\u03a6',
r'\Chi': u'\u03a7',
r'\Psi': u'\u03a8',
r'\Omega': u'\u03a9',
# hebrew
r'\aleph': u'\u05d0',
r'\beth': u'\u05d1',
r'\daleth': u'\u05d3',
r'\gimel': u'\u2137',
# more symbols
'\\AE' : u'\xc6',
'\\Angle' : u'\u299c',
'\\Bumpeq' : u'\u224e',
'\\Cap' : u'\u22d2',
'\\Colon' : u'\u2237',
'\\Cup' : u'\u22d3',
'\\DH' : u'\xd0',
'\\DJ' : u'\u0110',
'\\Digamma' : u'\u03dc',
'\\Koppa' : u'\u03de',
'\\L' : u'\u0141',
'\\LeftDownTeeVector': u'\u2961',
'\\LeftDownVectorBar': u'\u2959',
'\\LeftRightVector' : u'\u294e',
'\\LeftTeeVector' : u'\u295a',
'\\LeftTriangleBar' : u'\u29cf',
'\\LeftUpDownVector': u'\u2951',
'\\LeftUpTeeVector' : u'\u2960',
'\\LeftUpVectorBar' : u'\u2958',
'\\LeftVectorBar' : u'\u2952',
'\\Lleftarrow' : u'\u21da',
'\\Longleftarrow' : u'\u27f8',
'\\Longleftrightarrow': u'\u27fa',
'\\Longrightarrow' : u'\u27f9',
'\\Lsh' : u'\u21b0',
'\\NG' : u'\u014a',
'\\NestedGreaterGreater': u'\u2aa2',
'\\NestedLessLess' : u'\u2aa1',
'\\O' : u'\xd8',
'\\OE' : u'\u0152',
'\\ReverseUpEquilibrium': u'\u296f',
'\\RightDownTeeVector': u'\u295d',
'\\RightDownVectorBar': u'\u2955',
'\\RightTeeVector' : u'\u295b',
'\\RightTriangleBar': u'\u29d0',
'\\RightUpDownVector': u'\u294f',
'\\RightUpTeeVector': u'\u295c',
'\\RightUpVectorBar': u'\u2954',
'\\RightVectorBar' : u'\u2953',
'\\RoundImplies' : u'\u2970',
'\\Rrightarrow' : u'\u21db',
'\\Rsh' : u'\u21b1',
'\\RuleDelayed' : u'\u29f4',
'\\Sampi' : u'\u03e0',
'\\Stigma' : u'\u03da',
'\\Subset' : u'\u22d0',
'\\Supset' : u'\u22d1',
'\\TH' : u'\xde',
'\\UpArrowBar' : u'\u2912',
'\\UpEquilibrium' : u'\u296e',
'\\Uparrow' : u'\u21d1',
'\\Updownarrow' : u'\u21d5',
'\\VDash' : u'\u22ab',
'\\Vdash' : u'\u22a9',
'\\Vert' : u'\u2016',
'\\Vvdash' : u'\u22aa',
'\\aa' : u'\xe5',
'\\ae' : u'\xe6',
'\\allequal' : u'\u224c',
'\\amalg' : u'\u2a3f',
'\\angle' : u'\u2220',
'\\approxeq' : u'\u224a',
'\\approxnotequal' : u'\u2246',
'\\aquarius' : u'\u2652',
'\\aries' : u'\u2648',
'\\arrowwaveright' : u'\u219d',
'\\backepsilon' : u'\u03f6',
'\\backprime' : u'\u2035',
'\\backsim' : u'\u223d',
'\\backsimeq' : u'\u22cd',
'\\barwedge' : u'\u2305',
'\\because' : u'\u2235',
'\\between' : u'\u226c',
'\\bigcap' : u'\u22c2',
'\\bigcirc' : u'\u25ef',
'\\bigcup' : u'\u22c3',
'\\bigtriangledown' : u'\u25bd',
'\\bigtriangleup' : u'\u25b3',
'\\blacklozenge' : u'\u29eb',
'\\blacksquare' : u'\u25aa',
'\\blacktriangle' : u'\u25b4',
'\\blacktriangledown': u'\u25be',
'\\blacktriangleleft': u'\u25c2',
'\\blacktriangleright': u'\u25b8',
'\\boxdot' : u'\u22a1',
'\\boxminus' : u'\u229f',
'\\boxplus' : u'\u229e',
'\\boxtimes' : u'\u22a0',
'\\bumpeq' : u'\u224f',
'\\cancer' : u'\u264b',
'\\capricornus' : u'\u2651',
'\\cdots' : u'\u22ef',
'\\circeq' : u'\u2257',
'\\circlearrowleft' : u'\u21ba',
'\\circlearrowright': u'\u21bb',
'\\circledS' : u'\u24c8',
'\\circledast' : u'\u229b',
'\\circledcirc' : u'\u229a',
'\\circleddash' : u'\u229d',
'\\clockoint' : u'\u2a0f',
'\\clwintegral' : u'\u2231',
'\\complement' : u'\u2201',
'\\cong' : u'\u2245',
'\\coprod' : u'\u2210',
'\\curlyeqprec' : u'\u22de',
'\\curlyeqsucc' : u'\u22df',
'\\curlyvee' : u'\u22ce',
'\\curlywedge' : u'\u22cf',
'\\curvearrowleft' : u'\u21b6',
'\\curvearrowright' : u'\u21b7',
'\\dblarrowupdown' : u'\u21c5',
'\\ddddot' : u'\u20dc',
'\\dddot' : u'\u20db',
'\\dh' : u'\xf0',
'\\diagup' : u'\u2571',
'\\digamma' : u'\u03dd',
'\\div' : u'\xf7',
'\\divideontimes' : u'\u22c7',
'\\dj' : u'\u0111',
'\\doteqdot' : u'\u2251',
'\\dotplus' : u'\u2214',
'\\downdownarrows' : u'\u21ca',
'\\downharpoonleft' : u'\u21c3',
'\\downharpoonright': u'\u21c2',
'\\downslopeellipsis': u'\u22f1',
'\\eighthnote' : u'\u266a',
'\\eqcirc' : u'\u2256',
'\\eqslantgtr' : u'\u2a96',
'\\eqslantless' : u'\u2a95',
'\\estimates' : u'\u2259',
'\\eth' : u'\u01aa',
'\\exists' : u'\u2203',
'\\fallingdotseq' : u'\u2252',
'\\flat' : u'\u266d',
'\\forall' : u'\u2200',
'\\forcesextra' : u'\u22a8',
'\\frown' : u'\u2322',
'\\gemini' : u'\u264a',
'\\geq' : u'\u2265',
'\\geqq' : u'\u2267',
'\\geqslant' : u'\u2a7e',
'\\gnapprox' : u'\u2a8a',
'\\gneq' : u'\u2a88',
'\\gneqq' : u'\u2269',
'\\gnsim' : u'\u22e7',
'\\greaterequivlnt': u'\u2273',
'\\gtrapprox' : u'\u2a86',
'\\gtrdot' : u'\u22d7',
'\\gtreqless' : u'\u22db',
'\\gtreqqless' : u'\u2a8c',
'\\gtrless' : u'\u2277',
'\\guillemotleft' : u'\xab',
'\\guillemotright' : u'\xbb',
'\\guilsinglleft' : u'\u2039',
'\\guilsinglright' : u'\u203a',
'\\hermitconjmatrix': u'\u22b9',
'\\homothetic' : u'\u223b',
'\\hookleftarrow' : u'\u21a9',
'\\hookrightarrow' : u'\u21aa',
'\\hslash' : u'\u210f',
'\\i' : u'\u0131',
'\\intercal' : u'\u22ba',
'\\jupiter' : u'\u2643',
'\\k' : u'\u0328',
'\\l' : u'\u0142',
'\\langle' : u'\u2329',
'\\lazysinv' : u'\u223e',
'\\lceil' : u'\u2308',
'\\ldots' : u'\u2026',
'\\leftarrowtail' : u'\u21a2',
'\\leftharpoondown' : u'\u21bd',
'\\leftharpoonup' : u'\u21bc',
'\\leftleftarrows' : u'\u21c7',
'\\leftrightarrows' : u'\u21c6',
'\\leftrightharpoons': u'\u21cb',
'\\leftrightsquigarrow': u'\u21ad',
'\\leftthreetimes' : u'\u22cb',
'\\leo' : u'\u264c',
'\\leq' : u'\u2264',
'\\leqq' : u'\u2266',
'\\leqslant' : u'\u2a7d',
'\\lessapprox' : u'\u2a85',
'\\lessdot' : u'\u22d6',
'\\lesseqgtr' : u'\u22da',
'\\lesseqqgtr' : u'\u2a8b',
'\\lessequivlnt' : u'\u2272',
'\\lessgtr' : u'\u2276',
'\\lfloor' : u'\u230a',
'\\libra' : u'\u264e',
'\\llcorner' : u'\u231e',
'\\lmoustache' : u'\u23b0',
'\\lnapprox' : u'\u2a89',
'\\lneq' : u'\u2a87',
'\\lneqq' : u'\u2268',
'\\lnot' : u'\xac',
'\\lnsim' : u'\u22e6',
'\\longleftarrow' : u'\u27f5',
'\\longleftrightarrow': u'\u27f7',
'\\longmapsto' : u'\u27fc',
'\\longrightarrow' : u'\u27f6',
'\\looparrowleft' : u'\u21ab',
'\\looparrowright' : u'\u21ac',
'\\lozenge' : u'\u25ca',
'\\lrcorner' : u'\u231f',
'\\ltimes' : u'\u22c9',
'\\male' : u'\u2642',
'\\mapsto' : u'\u21a6',
'\\measuredangle' : u'\u2221',
'\\mercury' : u'\u263f',
'\\mho' : u'\u2127',
'\\mid' : u'\u2223',
'\\mkern1mu' : u'\u200a',
'\\mkern4mu' : u'\u205f',
'\\multimap' : u'\u22b8',
'\\nLeftarrow' : u'\u21cd',
'\\nLeftrightarrow' : u'\u21ce',
'\\nRightarrow' : u'\u21cf',
'\\nVDash' : u'\u22af',
'\\nVdash' : u'\u22ae',
'\\natural' : u'\u266e',
'\\nearrow' : u'\u2197',
'\\neptune' : u'\u2646',
'\\nexists' : u'\u2204',
'\\ng' : u'\u014b',
'\\nleftarrow' : u'\u219a',
'\\nleftrightarrow' : u'\u21ae',
'\\nmid' : u'\u2224',
'\\nolinebreak' : u'\u2060',
'\\notgreaterless' : u'\u2279',
'\\notlessgreater' : u'\u2278',
'\\nparallel' : u'\u2226',
'\\nrightarrow' : u'\u219b',
'\\ntriangleleft' : u'\u22ea',
'\\ntrianglelefteq' : u'\u22ec',
'\\ntriangleright' : u'\u22eb',
'\\ntrianglerighteq': u'\u22ed',
'\\nvDash' : u'\u22ad',
'\\nvdash' : u'\u22ac',
'\\nwarrow' : u'\u2196',
'\\o' : u'\xf8',
'\\oe' : u'\u0153',
'\\oint' : u'\u222e',
'\\openbracketleft' : u'\u301a',
'\\openbracketright': u'\u301b',
'\\original' : u'\u22b6',
'\\partial' : u'\u2202',
'\\perspcorrespond' : u'\u2a5e',
'\\pisces' : u'\u2653',
'\\pitchfork' : u'\u22d4',
'\\pluto' : u'\u2647',
'\\precapprox' : u'\u2ab7',
'\\preccurlyeq' : u'\u227c',
'\\precedesnotsimilar': u'\u22e8',
'\\precnapprox' : u'\u2ab9',
'\\precneqq' : u'\u2ab5',
'\\prod' : u'\u220f',
'\\quarternote' : u'\u2669',
'\\rangle' : u'\u232a',
'\\rbrace' : u'}',
'\\rceil' : u'\u2309',
'\\recorder' : u'\u2315',
'\\rfloor' : u'\u230b',
'\\rightangle' : u'\u221f',
'\\rightanglearc' : u'\u22be',
'\\rightarrowtail' : u'\u21a3',
'\\rightharpoondown': u'\u21c1',
'\\rightharpoonup' : u'\u21c0',
'\\rightleftarrows' : u'\u21c4',
'\\rightleftharpoons': u'\u21cc',
'\\rightmoon' : u'\u263e',
'\\rightrightarrows': u'\u21c9',
'\\rightsquigarrow' : u'\u21dd',
'\\rightthreetimes' : u'\u22cc',
'\\risingdotseq' : u'\u2253',
'\\rmoustache' : u'\u23b1',
'\\rtimes' : u'\u22ca',
'\\sagittarius' : u'\u2650',
'\\saturn' : u'\u2644',
'\\scorpio' : u'\u264f',
'\\searrow' : u'\u2198',
'\\setminus' : u'\u2216',
'\\sharp' : u'\u266f',
'\\smile' : u'\u2323',
'\\sphericalangle' : u'\u2222',
'\\sqcap' : u'\u2293',
'\\sqcup' : u'\u2294',
'\\sqrint' : u'\u2a16',
'\\square' : u'\u25a1',
'\\ss' : u'\xdf',
'\\starequal' : u'\u225b',
'\\subseteqq' : u'\u2ac5',
'\\subsetneq' : u'\u228a',
'\\subsetneqq' : u'\u2acb',
'\\succapprox' : u'\u2ab8',
'\\succcurlyeq' : u'\u227d',
'\\succnapprox' : u'\u2aba',
'\\succneqq' : u'\u2ab6',
'\\succnsim' : u'\u22e9',
'\\sum' : u'\u2211',
'\\supseteqq' : u'\u2ac6',
'\\supsetneq' : u'\u228b',
'\\supsetneqq' : u'\u2acc',
'\\surd' : u'\u221a',
'\\surfintegral' : u'\u222f',
'\\swarrow' : u'\u2199',
'\\taurus' : u'\u2649',
'\\textTheta' : u'\u03f4',
'\\textasciiacute' : u'\xb4',
'\\textasciibreve' : u'\u02d8',
'\\textasciicaron' : u'\u02c7',
'\\textasciidieresis': u'\xa8',
'\\textasciigrave' : u'`',
'\\textasciimacron' : u'\xaf',
'\\textasciitilde' : u'~',
'\\textbrokenbar' : u'\xa6',
'\\textbullet' : u'\u2022',
'\\textcent' : u'\xa2',
'\\textcopyright' : u'\xa9',
'\\textcurrency' : u'\xa4',
'\\textdagger' : u'\u2020',
'\\textdaggerdbl' : u'\u2021',
'\\textdegree' : u'\xb0',
'\\textdollar' : u'$',
'\\textdoublepipe' : u'\u01c2',
'\\textemdash' : u'\u2014',
'\\textendash' : u'\u2013',
'\\textexclamdown' : u'\xa1',
'\\texthvlig' : u'\u0195',
'\\textnrleg' : u'\u019e',
'\\textonehalf' : u'\xbd',
'\\textonequarter' : u'\xbc',
'\\textordfeminine' : u'\xaa',
'\\textordmasculine': u'\xba',
'\\textparagraph' : u'\xb6',
'\\textperiodcentered': u'\u02d9',
'\\textpertenthousand': u'\u2031',
'\\textperthousand' : u'\u2030',
'\\textphi' : u'\u0278',
'\\textquestiondown': u'\xbf',
'\\textquotedblleft': u'\u201c',
'\\textquotedblright': u'\u201d',
'\\textquotesingle' : u"'",
'\\textregistered' : u'\xae',
'\\textsection' : u'\xa7',
'\\textsterling' : u'\xa3',
'\\texttheta' : u'\u03b8',
'\\textthreequarters': u'\xbe',
'\\texttildelow' : u'\u02dc',
'\\texttimes' : u'\xd7',
'\\texttrademark' : u'\u2122',
'\\textturnk' : u'\u029e',
'\\textvartheta' : u'\u03d1',
'\\textvisiblespace': u'\u2423',
'\\textyen' : u'\xa5',
'\\th' : u'\xfe',
'\\therefore' : u'\u2234',
'\\tildetrpl' : u'\u224b',
'\\top' : u'\u22a4',
'\\triangledown' : u'\u25bf',
'\\triangleleft' : u'\u25c3',
'\\trianglelefteq' : u'\u22b4',
'\\triangleq' : u'\u225c',
'\\triangleright' : u'\u25b9',
'\\trianglerighteq' : u'\u22b5',
'\\truestate' : u'\u22a7',
'\\twoheadleftarrow': u'\u219e',
'\\twoheadrightarrow': u'\u21a0',
'\\ulcorner' : u'\u231c',
'\\updownarrow' : u'\u2195',
'\\upharpoonleft' : u'\u21bf',
'\\upharpoonright' : u'\u21be',
'\\upslopeellipsis' : u'\u22f0',
'\\upuparrows' : u'\u21c8',
'\\uranus' : u'\u2645',
'\\urcorner' : u'\u231d',
'\\varepsilon' : u'\u025b',
'\\varkappa' : u'\u03f0',
'\\varnothing' : u'\u2205',
'\\varphi' : u'\u03c6',
'\\varpi' : u'\u03d6',
'\\varrho' : u'\u03f1',
'\\varsigma' : u'\u03c2',
'\\vartriangle' : u'\u25b5',
'\\vartriangleleft' : u'\u22b2',
'\\vartriangleright': u'\u22b3',
'\\vdots' : u'\u22ee',
'\\veebar' : u'\u22bb',
'\\venus' : u'\u2640',
'\\vert' : u'|',
'\\verymuchgreater' : u'\u22d9',
'\\verymuchless' : u'\u22d8',
'\\virgo' : u'\u264d',
'\\volintegral' : u'\u2230',
'\\wp' : u'\u2118',
'\\wr' : u'\u2240',
}
class RenderState(object):
"""Holds the state of the rendering."""
def __init__(self, font, painter, x, y, alignhorz,
actually_render=True):
self.font = font
self.painter = painter
self.device = painter.device()
self.x = x # current x position
self.y = y # current y position
self.alignhorz = alignhorz
self.actually_render = actually_render
self.maxlines = 1 # maximim number of lines drawn
def fontMetrics(self):
"""Returns font metrics object."""
return FontMetrics(self.font, self.device)
def getPixelsPerPt(self):
"""Return number of pixels per point in the rendering."""
painter = self.painter
pixperpt = painter.device().logicalDpiY() / 72.
try:
pixperpt *= painter.scaling
except AttributeError:
pass
return pixperpt
class Part(object):
"""Represents a part of the text to be rendered, made up of smaller parts."""
def __init__(self, children):
self.children = children
def render(self, state):
for p in self.children:
p.render(state)
class PartText(Part):
"""Fundamental bit of text to be rendered: some text."""
def __init__(self, text):
self.text = text
def addText(self, text):
self.text += text
def render(self, state):
"""Render some text."""
width = state.fontMetrics().width(self.text)
# actually write the text if requested
if state.actually_render:
state.painter.drawText( qt4.QPointF(state.x, state.y), self.text )
# move along, nothing to see
state.x += width
class PartLines(Part):
"""Render multiple lines."""
def __init__(self, children):
Part.__init__(self, children)
self.widths = []
def render(self, state):
"""Render multiple lines."""
# record widths of individual lines
if not state.actually_render:
self.widths = []
height = state.fontMetrics().height()
inity = state.y
initx = state.x
state.y -= height*(len(self.children)-1)
# iterate over lines (reverse as we draw from bottom up)
for i, part in enumerate(self.children):
if state.actually_render and self.widths:
xwidth = max(self.widths)
# if we're rendering, use max width to justify line
if state.alignhorz < 0:
# left alignment
state.x = initx
elif state.alignhorz == 0:
# centre alignment
state.x = initx + (xwidth - self.widths[i])*0.5
elif state.alignhorz > 0:
# right alignment
state.x = initx + (xwidth - self.widths[i])
else:
# if not, just left justify to get widths
state.x = initx
# render the line itself
part.render(state)
# record width if we're not rendering
if not state.actually_render:
self.widths.append( state.x - initx )
# move up a line
state.y += height
# move on x posn
if self.widths:
state.x = initx + max(self.widths)
else:
state.x = initx
state.y = inity
# keep track of number of lines rendered
state.maxlines = max(state.maxlines, len(self.children))
class PartSuperScript(Part):
"""Represents superscripted part."""
def render(self, state):
font = state.font
painter = state.painter
# change text height
oldheight = state.fontMetrics().height()
size = font.pointSizeF()
font.setPointSizeF(size*0.6)
painter.setFont(font)
# set position
oldy = state.y
state.y -= oldheight*0.4
# draw children
Part.render(self, state)
# restore font and position
state.y = oldy
font.setPointSizeF(size)
painter.setFont(font)
class PartFrac(Part):
""""A fraction, do latex \frac{a}{b}."""
def render(self, state):
if len(self.children) != 2:
return
font = state.font
painter = state.painter
# make font half size
size = font.pointSizeF()
font.setPointSizeF(size*0.5)
painter.setFont(font)
# keep track of width above and below line
if not state.actually_render:
self.widths = []
initx = state.x
inity = state.y
# render bottom of fraction
if state.actually_render and len(self.widths) == 2:
# centre line
state.x = initx + (max(self.widths) - self.widths[0])*0.5
self.children[1].render(state)
if not state.actually_render:
# get width if not rendering
self.widths.append(state.x - initx)
# render top of fraction
m = state.fontMetrics()
state.y -= (m.ascent() + m.descent())
if state.actually_render and len(self.widths) == 2:
# centre line
state.x = initx + (max(self.widths) - self.widths[1])*0.5
else:
state.x = initx
self.children[0].render(state)
if not state.actually_render:
self.widths.append(state.x - initx)
state.x = initx + max(self.widths)
state.y = inity
# restore font
font.setPointSizeF(size)
painter.setFont(font)
height = state.fontMetrics().ascent()
# draw line between lines with 0.5pt thickness
painter.save()
pen = painter.pen()
painter.setPen( qt4.QPen(painter.pen().brush(),
state.getPixelsPerPt()*0.5) )
painter.setPen(pen)
painter.drawLine(qt4.QPointF(initx,
inity-height/2.),
qt4.QPointF(initx+max(self.widths),
inity-height/2.))
painter.restore()
class PartSubScript(Part):
"""Represents subscripted part."""
def render(self, state):
font = state.font
# change text height
size = font.pointSizeF()
font.setPointSizeF(size*0.6)
state.painter.setFont(font)
# set position
oldy = state.y
state.y += state.fontMetrics().descent()
# draw children
Part.render(self, state)
# restore font and position
state.y = oldy
font.setPointSizeF(size)
state.painter.setFont(font)
class PartMultiScript(Part):
"""Represents multiple parts with the same starting x, e.g. a combination of
super- and subscript parts."""
def render(self, state):
oldx = state.x
newx = oldx
for p in self.children:
state.x = oldx
p.render(state)
newx = max([state.x, newx])
state.x = newx
def append(self, p):
self.children.append(p)
class PartItalic(Part):
"""Represents italic part."""
def render(self, state):
font = state.font
font.setItalic( not font.italic() )
state.painter.setFont(font)
Part.render(self, state)
font.setItalic( not font.italic() )
state.painter.setFont(font)
class PartBold(Part):
"""Represents bold part."""
def render(self, state):
font = state.font
font.setBold( not font.bold() )
state.painter.setFont(font)
Part.render(self, state)
font.setBold( not font.bold() )
state.painter.setFont(font)
class PartUnderline(Part):
"""Represents underlined part."""
def render(self, state):
font = state.font
font.setUnderline( not font.underline() )
state.painter.setFont(font)
Part.render(self, state)
font.setUnderline( not font.underline() )
state.painter.setFont(font)
class PartFont(Part):
"""Change font name in part."""
def __init__(self, children):
try:
self.fontname = children[0].text
except (AttributeError, IndexError):
self.fontname = ''
self.children = children[1:]
def render(self, state):
font = state.font
oldfamily = font.family()
font.setFamily(self.fontname)
state.painter.setFont(font)
Part.render(self, state)
font.setFamily(oldfamily)
state.painter.setFont(font)
class PartSize(Part):
"""Change font size in part."""
def __init__(self, children):
self.size = None
self.deltasize = None
# convert size
try:
size = children[0].text.replace('pt', '') # crap code
if size[:1] in '+-':
# is a modification of font size
self.deltasize = float(size)
else:
# is an absolute font size
self.size = float(size)
except (AttributeError, ValueError, IndexError):
self.deltasize = 0.
self.children = children[1:]
def render(self, state):
font = state.font
size = oldsize = font.pointSizeF()
if self.size:
# absolute size
size = self.size
elif self.deltasize:
# change of size
size = max(size+self.deltasize, 0.1)
font.setPointSizeF(size)
state.painter.setFont(font)
Part.render(self, state)
font.setPointSizeF(oldsize)
state.painter.setFont(font)
class PartBar(Part):
"""Draw a bar over text."""
def render(self, state):
initx = state.x
# draw material under bar
Part.render(self, state)
# draw line over text with 0.5pt thickness
painter = state.painter
height = state.fontMetrics().ascent()
painter.save()
penw = state.getPixelsPerPt()*0.5
painter.setPen( qt4.QPen(painter.pen().brush(), penw) )
painter.drawLine(qt4.QPointF(initx,
state.y-height+penw),
qt4.QPointF(state.x,
state.y-height+penw))
painter.restore()
class PartDot(Part):
"""Draw a dot over text."""
def render(self, state):
initx = state.x
# draw material under bar
Part.render(self, state)
# draw circle over text with 1pt radius
painter = state.painter
height = state.fontMetrics().ascent()
painter.save()
circsize = state.getPixelsPerPt()
painter.setBrush( qt4.QBrush(painter.pen().color()) )
painter.setPen( qt4.QPen(qt4.Qt.NoPen) )
x = 0.5*(initx + state.x)
y = state.y-height + circsize
painter.drawEllipse( qt4.QRectF(
qt4.QPointF(x-circsize,y-circsize),
qt4.QPointF(x+circsize,y+circsize)) )
painter.restore()
class PartMarker(Part):
"""Draw a marker symbol."""
def render(self, state):
painter = state.painter
size = state.fontMetrics().ascent()
painter.save()
pen = painter.pen()
pen.setWidthF( state.getPixelsPerPt() * 0.5 )
painter.setPen(pen)
try:
points.plotMarker(
painter, state.x + size/2.,
state.y - size/2.,
self.children[0].text, size*0.3)
except ValueError:
pass
painter.restore()
state.x += size
class PartColor(Part):
def __init__(self, children):
try:
self.colorname = children[0].text
except (AttributeError, IndexError):
self.colorname = ''
self.children = children[1:]
def render(self, state):
painter = state.painter
pen = painter.pen()
oldcolor = pen.color()
pen.setColor( qt4.QColor(self.colorname) )
painter.setPen(pen)
Part.render(self, state)
pen.setColor(oldcolor)
painter.setPen(pen)
# a dict of latex commands, the part object they correspond to,
# and the number of arguments
part_commands = {
'^': (PartSuperScript, 1),
'_': (PartSubScript, 1),
r'\italic': (PartItalic, 1),
r'\emph': (PartItalic, 1),
r'\bold': (PartBold, 1),
r'\underline': (PartUnderline, 1),
r'\textbf': (PartBold, 1),
r'\textit': (PartItalic, 1),
r'\font': (PartFont, 2),
r'\size': (PartSize, 2),
r'\frac': (PartFrac, 2),
r'\bar': (PartBar, 1),
r'\overline': (PartBar, 1),
r'\dot': (PartDot, 1),
r'\marker': (PartMarker, 1),
r'\color': (PartColor, 2),
}
# split up latex expression into bits
splitter_re = re.compile(r'''
(
\\[A-Za-z]+[ ]* | # normal latex command
\\[\[\]{}_^] | # escaped special characters
\\\\ | # line end
\{ | # begin block
\} | # end block
\^ | # power
_ # subscript
)
''', re.VERBOSE)
def latexEscape(text):
"""Escape any special characters in LaTex-like code."""
return re.sub(r'([_\^\[\]\{\}\\])', r'\\\1', text)
def makePartList(text):
"""Make list of parts from text"""
parts = []
parents = [parts]
def doAdd(p):
"""Add the part at the correct level."""
parents[-1].append(p)
return p
for p in splitter_re.split(text):
if p[:1] == '\\':
# we may need to drop excess spaces after \foo commands
ps = p.rstrip()
if ps in symbols:
# it will become a symbol, so preserve whitespace
doAdd(ps)
if ps != p:
doAdd(p[len(ps)-len(p):])
else:
# add as possible command, so drop excess whitespace
doAdd(ps)
elif p == '{':
# add a new level
parents.append( doAdd([]) )
elif p == '}':
if len(parents) > 1:
parents.pop()
elif p:
# if not blank, keep it
doAdd(p)
return parts
def makePartTree(partlist):
"""Make a tree of parts from the part list."""
lines = []
itemlist = []
length = len(partlist)
def addText(text):
"""Try to merge consecutive text items for better rendering."""
if itemlist and isinstance(itemlist[-1], PartText):
itemlist[-1].addText(text)
else:
itemlist.append( PartText(text) )
i = 0
while i < length:
p = partlist[i]
if p == r'\\':
lines.append( Part(itemlist) )
itemlist = []
elif isinstance(p, cbasestr):
if p in symbols:
addText(symbols[p])
elif p in part_commands:
klass, numargs = part_commands[p]
if numargs == 1 and len(partlist) > i+1 and isinstance(partlist[i+1], cbasestr):
# coerce a single argument to a partlist so that things
# like "A^\dagger" render correctly without needing
# curly brackets
partargs = [makePartTree([partlist[i+1]])]
else:
partargs = [makePartTree(k) for k in partlist[i+1:i+numargs+1]]
if (p == '^' or p == '_'):
if len(itemlist) > 0 and (
isinstance(itemlist[-1], PartSubScript) or
isinstance(itemlist[-1], PartSuperScript) or
isinstance(itemlist[-1], PartMultiScript)):
# combine sequences of multiple sub-/superscript parts into
# a MultiScript item so that a single text item can have
# both super and subscript indicies
# e.g. X^{(q)}_{i}
if isinstance(itemlist[-1], PartMultiScript):
itemlist.append( klass(partargs) )
else:
itemlist[-1] = PartMultiScript([itemlist[-1], klass(partargs)])
else:
itemlist.append( klass(partargs) )
else:
itemlist.append( klass(partargs) )
i += numargs
else:
addText(p)
else:
itemlist.append( makePartTree(p) )
i += 1
# remaining items
lines.append( Part(itemlist) )
if len(lines) == 1:
# single line, so optimize (itemlist == lines[0] still)
if len(itemlist) == 1:
# try to flatten any excess layers
return itemlist[0]
else:
return lines[0]
else:
return PartLines(lines)
class _Renderer:
"""Different renderer types based on this."""
def __init__(
self, painter, font, x, y, text,
alignhorz = -1, alignvert = -1, angle = 0,
usefullheight = False,
doc = None):
self.painter = painter
self.font = font
self.alignhorz = alignhorz
self.alignvert = alignvert
self.angle = angle
self.usefullheight = usefullheight
self.doc = doc
# x and y are the original coordinates
# xi and yi are adjusted for alignment
self.x = self.xi = x
self.y = self.yi = y
self.calcbounds = None
self._initText(text)
def _initText(self, text):
"""Override this to set up renderer with text."""
def ensureInBox(self, minx = -32767, maxx = 32767,
miny = -32767, maxy = 32767, extraspace = False):
"""Adjust position of text so that it is within this box."""
if self.calcbounds is None:
self.getBounds()
cb = self.calcbounds
# add a small amount of extra room if requested
if extraspace:
self.painter.setFont(self.font)
l = FontMetrics(
self.font,
self.painter.device()).height()*0.2
miny += l
# twiddle positions and bounds
if cb[2] > maxx:
dx = cb[2] - maxx
self.xi -= dx
cb[2] -= dx
cb[0] -= dx
if cb[0] < minx:
dx = minx - cb[0]
self.xi += dx
cb[2] += dx
cb[0] += dx
if cb[3] > maxy:
dy = cb[3] - maxy
self.yi -= dy
cb[3] -= dy
cb[1] -= dy
if cb[1] < miny:
dy = miny - cb[1]
self.yi += dy
cb[3] += dy
cb[1] += dy
def getDimensions(self):
"""Get the (w, h) of the bounding box."""
if self.calcbounds is None:
self.getBounds()
cb = self.calcbounds
return (cb[2]-cb[0]+1, cb[3]-cb[1]+1)
def _getWidthHeight(self):
"""Calculate the width and height of rendered text.
Return totalwidth, totalheight, dy
dy is a descent to add, to include in the alignment, if wanted
"""
def getTightBounds(self):
"""Get bounds in form of rotated rectangle."""
largebounds = self.getBounds()
totalwidth, totalheight, dy = self._getWidthHeight()
return RotatedRectangle(
0.5*(largebounds[0]+largebounds[2]),
0.5*(largebounds[1]+largebounds[3]),
totalwidth,
totalheight+dy,
self.angle * math.pi / 180.)
def getBounds(self):
"""Get bounds in standard version."""
if self.calcbounds is not None:
return self.calcbounds
totalwidth, totalheight, dy = self._getWidthHeight()
# in order to work out text position, we rotate a bounding box
# in fact we add two extra points to account for descent if reqd
tw = totalwidth / 2
th = totalheight / 2
coordx = N.array( [-tw, tw, tw, -tw, -tw, tw ] )
coordy = N.array( [ th, th, -th, -th, th+dy, th+dy] )
# rotate angles by theta
theta = -self.angle * (math.pi / 180.)
c = math.cos(theta)
s = math.sin(theta)
newx = coordx*c + coordy*s
newy = coordy*c - coordx*s
# calculate bounding box
newbound = (newx.min(), newy.min(), newx.max(), newy.max())
# use rotated bounding box to find position of start text posn
if self.alignhorz < 0:
xr = ( self.x, self.x+(newbound[2]-newbound[0]) )
self.xi += (newx[0] - newbound[0])
elif self.alignhorz > 0:
xr = ( self.x-(newbound[2]-newbound[0]), self.x )
self.xi += (newx[0] - newbound[2])
else:
xr = ( self.x+newbound[0], self.x+newbound[2] )
self.xi += newx[0]
# y alignment
# adjust y by these values to ensure proper alignment
if self.alignvert < 0:
yr = ( self.y + (newbound[1]-newbound[3]), self.y )
self.yi += (newy[0] - newbound[3])
elif self.alignvert > 0:
yr = ( self.y, self.y + (newbound[3]-newbound[1]) )
self.yi += (newy[0] - newbound[1])
else:
yr = ( self.y+newbound[1], self.y+newbound[3] )
self.yi += newy[0]
self.calcbounds = [xr[0], yr[0], xr[1], yr[1]]
return self.calcbounds
class _StdRenderer(_Renderer):
"""Standard rendering class."""
# expresions in brackets %{{ }}% are evaluated
exprexpansion = re.compile(r'%\{\{(.+?)\}\}%')
def _initText(self, text):
# expand any expressions in the text
delta = 0
for m in self.exprexpansion.finditer(text):
expanded = self._expandExpr(m.group(1))
text = text[:delta+m.start()] + expanded + text[delta+m.end():]
delta += len(expanded) - (m.end()-m.start())
# make internal tree
partlist = makePartList(text)
self.parttree = makePartTree(partlist)
def _expandExpr(self, expr):
"""Expand expression."""
if self.doc is None:
return "*not supported here*"
else:
expr = expr.strip()
try:
comp = self.doc.evaluate.compileCheckedExpression(expr)
return cstr(eval(comp, self.doc.evaluate.context))
except Exception as e:
return latexEscape(cstr(e))
def _getWidthHeight(self):
"""Get size of box around text."""
# work out total width and height
self.painter.setFont(self.font)
# work out height of box, and
# make the bounding box a bit bigger if we want to include descents
state = RenderState(
self.font, self.painter, 0, 0,
self.alignhorz,
actually_render = False)
fm = state.fontMetrics()
if self.usefullheight:
totalheight = fm.ascent()
dy = fm.descent()
else:
if self.alignvert == 0:
# if want vertical centering, better to centre around middle
# of typical letter (i.e. where strike position is)
#totalheight = fm.strikeOutPos()*2
totalheight = fm.boundingRectChar('0').height()
else:
# if top/bottom alignment, better to use maximum letter height
totalheight = fm.ascent()
dy = 0
# work out width
self.parttree.render(state)
totalwidth = state.x
# add number of lines for height
totalheight += fm.height()*(state.maxlines-1)
return totalwidth, totalheight, dy
def render(self):
"""Render the text."""
if self.calcbounds is None:
self.getBounds()
state = RenderState(
self.font, self.painter,
self.xi, self.yi,
self.alignhorz)
# if the text is rotated, change the coordinate frame
if self.angle != 0:
self.painter.save()
self.painter.translate( qt4.QPointF(state.x, state.y) )
self.painter.rotate(self.angle)
state.x = 0
state.y = 0
# actually paint the string
self.painter.setFont(self.font)
self.parttree.render(state)
# restore coordinate frame if text was rotated
if self.angle != 0:
self.painter.restore()
# caller might want this information
return self.calcbounds
class _MmlRenderer(_Renderer):
"""MathML renderer."""
def _initText(self, text):
"""Setup MML document and draw it in recording paint device."""
self.error = ''
self.size = qt4.QSize(1, 1)
if not mmlsupport:
self.mmldoc = None
self.error = 'Error: MathML support not built\n'
return
self.mmldoc = doc = qtmml.QtMmlDocument()
try:
self.mmldoc.setContent(text)
except ValueError as e:
self.mmldoc = None
self.error = ('Error interpreting MathML: %s\n' %
cstr(e))
return
# this is pretty horrible :-(
# We write the mathmml document to a RecordPaintDevice device
# at the same DPI as the screen, because the MML code breaks
# for other DPIs. We then repaint the output to the real
# device, scaling to make the size correct.
screendev = qt4.QApplication.desktop()
self.record = recordpaint.RecordPaintDevice(
1024, 1024, screendev.logicalDpiX(), screendev.logicalDpiY())
rpaint = qt4.QPainter(self.record)
# painting code relies on these attributes of the painter
rpaint.pixperpt = screendev.logicalDpiY() / 72.
rpaint.scaling = 1.0
# Upscale any drawing by this factor, then scale back when
# drawing. We have to do this to get consistent output at
# different zoom factors (I hate this code).
upscale = 5.
doc.setFontName( qtmml.QtMmlWidget.NormalFont, self.font.family() )
ptsize = self.font.pointSizeF()
if ptsize < 0:
ptsize = self.font.pixelSize() / self.painter.pixperpt
ptsize /= self.painter.scaling
doc.setBaseFontPointSize(ptsize * upscale)
# the output will be painted finally scaled
self.drawscale = (
self.painter.scaling * self.painter.dpi / screendev.logicalDpiY()
/ upscale )
self.size = doc.size() * self.drawscale
doc.paint(rpaint, qt4.QPoint(0, 0))
rpaint.end()
def _getWidthHeight(self):
return self.size.width(), self.size.height(), 0
def render(self):
"""Render the text."""
if self.calcbounds is None:
self.getBounds()
p = self.painter
p.save()
if self.mmldoc is not None:
p.translate(self.xi, self.yi)
p.rotate(self.angle)
# is drawn from bottom of box, not top
p.translate(0, -self.size.height())
p.scale(self.drawscale, self.drawscale)
self.record.play(p)
else:
# display an error - must be a better way to do this
p.setFont(qt4.QFont())
p.setPen(qt4.QPen(qt4.QColor("red")))
p.drawText( qt4.QRectF(self.xi, self.yi, 200, 200),
qt4.Qt.AlignLeft | qt4.Qt.AlignTop |
qt4.Qt.TextWordWrap,
self.error )
p.restore()
return self.calcbounds
# identify mathml text
mml_re = re.compile(r'^\s*<math.*</math\s*>\s*$', re.DOTALL)
def Renderer(painter, font, x, y, text,
alignhorz = -1, alignvert = -1, angle = 0,
usefullheight = False,
doc = None):
"""Return an appropriate Renderer object depending on the text.
This looks like a class name, because it was a class originally.
painter is the painter to draw on
font is the starting font to use
x and y are the x and y positions to draw the text at
alignhorz = (-1, 0, 1) for (left, centre, right) alignment
alignvert = (-1, 0, 1) for (above, centre, below) alignment
angle is the angle to draw the text at
usefullheight means include descenders in calculation of height
of text
doc is a Document for evaluating any expressions
alignment is in the painter frame, not the text frame
"""
if mml_re.match(text):
r = _MmlRenderer
else:
r = _StdRenderer
return r(
painter, font, x, y, text,
alignhorz=alignhorz, alignvert=alignvert,
angle=angle, usefullheight=usefullheight,
doc=doc
)
|
OpenReliability/OpenReliability
|
veusz/utils/textrender.py
|
Python
|
gpl-2.0
| 48,574
|
[
"Bowtie"
] |
c8411a57c3dd75e42997913d67167a969727ad74d424b8586fb0dadb868a0a15
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Marek Rudnicki"
import numpy as np
from neuron import h
import waves as wv
def record_voltages(secs):
vecs = []
for sec in secs:
vec = h.Vector()
vec.record(sec(0.5)._ref_v)
vecs.append(vec)
return vecs
def plot_voltages(fs, vecs):
import biggles
all_values = np.concatenate( vecs )
hi = all_values.max()
lo = all_values.min()
plot = biggles.Table(len(vecs), 1)
plot.cellpadding = 0
plot.cellspacing = 0
for i,vec in enumerate(vecs):
p = biggles.Plot()
p.add( biggles.Curve(wv.t(fs, vec), vec) )
p.yrange = (lo, hi)
plot[i,0] = p
p.add( biggles.LineX(0) )
p.add( biggles.Label(0, (hi+lo)/2, "%.2f mV" % (hi-lo), halign='left') )
p.add( biggles.LineY(lo) )
p.add( biggles.Label((len(vec)/fs/2), lo, "%.1f ms" % (1000*len(vec)/fs), valign='bottom') )
return plot
def main():
pass
if __name__ == "__main__":
import biggles
main()
|
timtammittee/thorns
|
thorns/nrn.py
|
Python
|
gpl-3.0
| 1,042
|
[
"NEURON"
] |
436052c78bdc7f84d350de92bf1e727e3c456b3a7bf433a5a6ec9cbb0795b156
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ldap
from nose.plugins.attrib import attr
from nose.tools import assert_true, assert_equal, assert_false
import desktop.conf
from desktop.lib.test_utils import grant_access
from desktop.lib.django_test_util import make_logged_in_client
from django.conf import settings
from django.contrib.auth.models import User, Group
from django.core.urlresolvers import reverse
from useradmin.models import LdapGroup, UserProfile, get_profile
from hadoop import pseudo_hdfs4
from views import sync_ldap_users, sync_ldap_groups, import_ldap_users, import_ldap_groups, \
add_ldap_users, add_ldap_groups, sync_ldap_users_groups
import ldap_access
from tests import LdapTestConnection, reset_all_groups, reset_all_users
def test_useradmin_ldap_user_group_membership_sync():
settings.MIDDLEWARE_CLASSES.append('useradmin.middleware.LdapSynchronizationMiddleware')
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
try:
# Import curly who is part of TestUsers and Test Administrators
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=False, import_by_dn=False)
# Set a password so that we can login
user = User.objects.get(username='curly')
user.set_password('test')
user.save()
# Should have 0 groups
assert_equal(0, user.groups.all().count())
# Make an authenticated request as curly so that we can see call middleware.
c = make_logged_in_client('curly', 'test', is_superuser=False)
grant_access("curly", "test", "useradmin")
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 3 groups now. 2 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
# Now remove a group and try again.
old_group = ldap_access.CACHED_LDAP_CONN._instance.users['curly']['groups'].pop()
# Make an authenticated request as curly so that we can see call middleware.
response = c.get('/useradmin/users')
# Refresh user groups
user = User.objects.get(username='curly')
# Should have 2 groups now. 1 from LDAP and 1 from 'grant_access' call.
assert_equal(3, user.groups.all().count(), user.groups.all())
finally:
settings.MIDDLEWARE_CLASSES.remove('useradmin.middleware.LdapSynchronizationMiddleware')
def test_useradmin_ldap_suboordinate_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 4)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 3)
assert_equal(Group.objects.all().count(), 2)
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 2)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
assert_equal(test_users.user_set.all().count(), 3)
ldap_access.CACHED_LDAP_CONN.remove_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='moe').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_user_group_for_test('uid=moe,ou=People,dc=example,dc=com', 'TestUsers')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 3)
assert_equal(User.objects.get(username='moe').groups.all().count(), 1)
# Import all members of TestUsers and not members of suboordinate groups (even though specified)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'TestUsers', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='TestUsers')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Nested group import
# First without recursive import, then with.
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 0, nested_group.user_set.all())
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
nested_groups = Group.objects.get(name='NestedGroups')
nested_group = Group.objects.get(name='NestedGroup')
assert_true(LdapGroup.objects.filter(group=nested_groups).exists())
assert_true(LdapGroup.objects.filter(group=nested_group).exists())
assert_equal(nested_groups.user_set.all().count(), 0, nested_groups.user_set.all())
assert_equal(nested_group.user_set.all().count(), 1, nested_group.user_set.all())
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_suboordinate_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test old subgroups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("suboordinate"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 3)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_nested_posix_group_integration():
reset_all_users()
reset_all_groups()
reset = []
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Test nested groups
reset.append(desktop.conf.LDAP.SUBGROUPS.set_for_testing("nested"))
try:
# Import groups only
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=False, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
# Import all members of TestUsers
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Should import a group, but will only sync already-imported members
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'Test Administrators', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(User.objects.all().count(), 2, User.objects.all())
assert_equal(Group.objects.all().count(), 2, Group.objects.all())
test_admins = Group.objects.get(name='Test Administrators')
assert_equal(test_admins.user_set.all().count(), 1)
larry = User.objects.get(username='lårry')
assert_equal(test_admins.user_set.all()[0].username, larry.username)
# Only sync already imported
ldap_access.CACHED_LDAP_CONN.remove_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 1)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 0)
# Import missing user
ldap_access.CACHED_LDAP_CONN.add_posix_user_group_for_test('posix_person', 'PosixGroup')
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_equal(test_users.user_set.all().count(), 2)
assert_equal(User.objects.get(username='posix_person').groups.all().count(), 1)
# Import all members of PosixGroup and members of subgroups (there should be no subgroups)
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'PosixGroup', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Import all members of NestedPosixGroups and members of subgroups
reset_all_users()
reset_all_groups()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'NestedPosixGroups', import_members=True, import_members_recursive=True, sync_users=True, import_by_dn=False)
test_users = Group.objects.get(name='NestedPosixGroups')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 0)
test_users = Group.objects.get(name='PosixGroup')
assert_true(LdapGroup.objects.filter(group=test_users).exists())
assert_equal(test_users.user_set.all().count(), 2)
# Make sure Hue groups with naming collisions don't get marked as LDAP groups
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
hue_group = Group.objects.create(name='OtherGroup')
hue_group.user_set.add(hue_user)
hue_group.save()
import_ldap_groups(ldap_access.CACHED_LDAP_CONN, 'OtherGroup', import_members=False, import_members_recursive=False, sync_users=True, import_by_dn=False)
assert_false(LdapGroup.objects.filter(group=hue_group).exists())
assert_true(hue_group.user_set.filter(username=hue_user.username).exists())
finally:
for finish in reset:
finish()
def test_useradmin_ldap_user_integration():
done = []
try:
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
# Try importing a user
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'lårry', sync_groups=False, import_by_dn=False)
larry = User.objects.get(username='lårry')
assert_true(larry.first_name == 'Larry')
assert_true(larry.last_name == 'Stooge')
assert_true(larry.email == 'larry@stooges.com')
assert_true(get_profile(larry).creation_method == str(UserProfile.CreationMethod.EXTERNAL))
# Should be a noop
sync_ldap_users(ldap_access.CACHED_LDAP_CONN)
sync_ldap_groups(ldap_access.CACHED_LDAP_CONN)
assert_equal(User.objects.all().count(), 1)
assert_equal(Group.objects.all().count(), 0)
# Make sure that if a Hue user already exists with a naming collision, we
# won't overwrite any of that user's information.
hue_user = User.objects.create(username='otherguy', first_name='Different', last_name='Guy')
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'otherguy', sync_groups=False, import_by_dn=False)
hue_user = User.objects.get(username='otherguy')
assert_equal(get_profile(hue_user).creation_method, str(UserProfile.CreationMethod.HUE))
assert_equal(hue_user.first_name, 'Different')
# Try importing a user and sync groups
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'curly', sync_groups=True, import_by_dn=False)
curly = User.objects.get(username='curly')
assert_equal(curly.first_name, 'Curly')
assert_equal(curly.last_name, 'Stooge')
assert_equal(curly.email, 'curly@stooges.com')
assert_equal(get_profile(curly).creation_method, str(UserProfile.CreationMethod.EXTERNAL))
assert_equal(2, curly.groups.all().count(), curly.groups.all())
reset_all_users()
reset_all_groups()
# Test import case sensitivity
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Lårry', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Lårry').exists())
assert_true(User.objects.filter(username='lårry').exists())
# Test lower case
User.objects.filter(username__iexact='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_true(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_true(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
User.objects.filter(username='Rock').delete()
import_ldap_users(ldap_access.CACHED_LDAP_CONN, 'Rock', sync_groups=False, import_by_dn=False)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
finally:
for finish in done:
finish()
def test_add_ldap_users():
done = []
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'], response)
# Test ignore case
done.append(desktop.conf.LDAP.IGNORE_USERNAME_CASE.set_for_testing(True))
User.objects.filter(username='moe').delete()
assert_false(User.objects.filter(username='Moe').exists())
assert_false(User.objects.filter(username='moe').exists())
response = c.post(URL, dict(username_pattern='Moe', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Moe').exists())
assert_true(User.objects.filter(username='moe').exists())
# Test lower case
done.append(desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.set_for_testing(True))
User.objects.filter(username__iexact='Rock').delete()
assert_false(User.objects.filter(username='Rock').exists())
assert_false(User.objects.filter(username='rock').exists())
response = c.post(URL, dict(username_pattern='rock', password1='test', password2='test'))
assert_true('Location' in response, response)
assert_true('/useradmin/users' in response['Location'], response)
assert_false(User.objects.filter(username='Rock').exists())
assert_true(User.objects.filter(username='rock').exists())
# Test regular with spaces (should fail)
response = c.post(URL, dict(username_pattern='user with space', password1='test', password2='test'))
assert_true("Username must not contain whitespaces and ':'" in response.context['form'].errors['username_pattern'][0], response)
# Test dn with spaces in username and dn (should fail)
response = c.post(URL, dict(username_pattern='uid=user with space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true("There was a problem with some of the LDAP information" in response.content, response)
assert_true("Username must not contain whitespaces" in response.content, response)
# Test dn with spaces in dn, but not username (should succeed)
response = c.post(URL, dict(username_pattern='uid=user without space,ou=People,dc=example,dc=com', password1='test', password2='test', dn=True))
assert_true(User.objects.filter(username='spaceless').exists())
finally:
for finish in done:
finish()
def test_add_ldap_groups():
URL = reverse(add_ldap_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client(username='test', is_superuser=True)
assert_true(c.get(URL))
response = c.post(URL, dict(groupname_pattern='TestUsers'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'])
# Test with space
response = c.post(URL, dict(groupname_pattern='Test Administrators'))
assert_true('Location' in response, response)
assert_true('/useradmin/groups' in response['Location'], response)
response = c.post(URL, dict(groupname_pattern='toolongnametoolongnametoolongnametoolongnametoolongnametoolongnametoolongnametoolongname'))
assert_true('Ensure this value has at most 80 characters' in response.context['form'].errors['groupname_pattern'][0], response)
# Test wild card
response = c.post(URL, dict(groupname_pattern='*r*'))
assert_true('/useradmin/groups' in response['Location'], response)
def test_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
c = make_logged_in_client('test', is_superuser=True)
assert_true(c.get(URL))
assert_true(c.post(URL))
def test_ldap_exception_handling():
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
class LdapTestConnectionError(LdapTestConnection):
def find_users(self, user, find_by_dn=False):
raise ldap.LDAPError('No such object')
ldap_access.CACHED_LDAP_CONN = LdapTestConnectionError()
c = make_logged_in_client('test', is_superuser=True)
response = c.post(reverse(add_ldap_users), dict(username_pattern='moe', password1='test', password2='test'), follow=True)
assert_true('There was an error when communicating with LDAP' in response.content, response)
@attr('requires_hadoop')
def test_ensure_home_directory_add_ldap_users():
try:
URL = reverse(add_ldap_users)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
assert_true(c.get(URL))
response = c.post(URL, dict(username_pattern='moe', password1='test', password2='test'))
assert_true('/useradmin/users' in response['Location'])
assert_false(cluster.fs.exists('/user/moe'))
# Try same thing with home directory creation.
response = c.post(URL, dict(username_pattern='curly', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
response = c.post(URL, dict(username_pattern='bad_name', password1='test', password2='test'))
assert_true('Could not' in response.context['form'].errors['username_pattern'][0])
assert_false(cluster.fs.exists('/user/bad_name'))
# See if moe, who did not ask for his home directory, has a home directory.
assert_false(cluster.fs.exists('/user/moe'))
# Try wild card now
response = c.post(URL, dict(username_pattern='*rr*', password1='test', password2='test', ensure_home_directory=True))
assert_true('/useradmin/users' in response['Location'])
assert_true(cluster.fs.exists('/user/curly'))
assert_true(cluster.fs.exists(u'/user/lårry'))
assert_false(cluster.fs.exists('/user/otherguy'))
finally:
# Clean up
if cluster.fs.exists('/user/curly'):
cluster.fs.rmtree('/user/curly')
if cluster.fs.exists(u'/user/lårry'):
cluster.fs.rmtree(u'/user/lårry')
if cluster.fs.exists('/user/otherguy'):
cluster.fs.rmtree('/user/otherguy')
@attr('requires_hadoop')
def test_ensure_home_directory_sync_ldap_users_groups():
URL = reverse(sync_ldap_users_groups)
reset_all_users()
reset_all_groups()
# Set up LDAP tests to use a LdapTestConnection instead of an actual LDAP connection
ldap_access.CACHED_LDAP_CONN = LdapTestConnection()
cluster = pseudo_hdfs4.shared_cluster()
c = make_logged_in_client(cluster.superuser, is_superuser=True)
cluster.fs.setuser(cluster.superuser)
c.post(reverse(add_ldap_users), dict(username_pattern='curly', password1='test', password2='test'))
assert_false(cluster.fs.exists('/user/curly'))
assert_true(c.post(URL, dict(ensure_home_directory=True)))
assert_true(cluster.fs.exists('/user/curly'))
|
yongshengwang/builthue
|
apps/useradmin/src/useradmin/test_ldap_deprecated.py
|
Python
|
apache-2.0
| 31,025
|
[
"MOE"
] |
62f6073a33768991995fa59be9a97a52a21a2cd408f772e917922dc76bda9374
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import os
import numpy as np
from psi4 import core
from psi4.driver import p4util
from psi4.driver.p4util.exceptions import *
def ah_iteration(mcscf_obj, tol=1e-3, max_iter=15, lindep=1e-14, print_micro=True):
"""
Solve the generalized eigenvalue problem:
| 0, g.T | | 1/l | = | 1/l |
| g, H/l | | X | = e | X |
Where g is the gradient, H is the orbital Hessian, X is our orbital update step,
and l is the eigenvalue.
In some ways this is the subspace reduction of the full MCSCF Hessian where the
CC part has been solved exactly. When this occurs the OC and CO elements collapse
to the above and the CC Hessian becomes diagonally dominant.
We can solve this through Davidson iterations where we condition the edges. It's the
Pulay equations all over again, just iterative.
Watch out for lambdas that are zero. Looking for the lambda that is ~1.
"""
# Unpack information
orb_grad = mcscf_obj.gradient()
precon = mcscf_obj.H_approx_diag()
approx_step = mcscf_obj.approx_solve()
orb_grad_ssq = orb_grad.sum_of_squares()
# Gears
min_lambda = 0.3
converged = False
warning_neg = False
warning_mult = False
fullG = np.zeros((max_iter + 2, max_iter + 2))
fullS = np.zeros((max_iter + 2, max_iter + 2))
fullS[np.diag_indices_from(fullS)] = 1
guesses = []
sigma_list = []
guesses.append(approx_step)
sigma_list.append(mcscf_obj.compute_Hk(approx_step))
if print_micro:
core.print_out("\n Eigenvalue Rel dE dX \n")
# Run Davidson look for lambda ~ 1
old_val = 0
for microi in range(1, max_iter + 1):
# Gradient
fullG[0, microi] = guesses[-1].vector_dot(orb_grad)
for i in range(microi):
fullG[i + 1, microi] = guesses[-1].vector_dot(sigma_list[i])
fullS[i + 1, microi] = guesses[-1].vector_dot(guesses[i])
fullG[microi] = fullG[:, microi]
fullS[microi] = fullS[:, microi]
wlast = old_val
# Slice out relevant S and G
S = fullS[:microi + 1, :microi + 1]
G = fullG[:microi + 1, :microi + 1]
# Solve Gv = lSv
v, L = np.linalg.eigh(S)
mask = v > (np.min(np.abs(v)) * 1.e-10)
invL = L[:, mask] * (v[mask]**-0.5)
# Solve in S basis, rotate back
evals, evecs = np.linalg.eigh(np.dot(invL.T, G).dot(invL))
vectors = np.dot(invL, evecs)
# Figure out the right root to follow
if np.sum(np.abs(vectors[0]) > min_lambda) == 0:
raise PsiException("Augmented Hessian: Could not find the correct root!\n"\
"Try starting AH when the MCSCF wavefunction is more converged.")
if np.sum(np.abs(vectors[0]) > min_lambda) > 1 and not warning_mult:
core.print_out(" Warning! Multiple eigenvectors found to follow. Following closest to \lambda = 1.\n")
warning_mult = True
idx = (np.abs(1 - np.abs(vectors[0]))).argmin()
lam = abs(vectors[0, idx])
subspace_vec = vectors[1:, idx]
# Negative roots should go away?
if idx > 0 and evals[idx] < -5.0e-6 and not warning_neg:
core.print_out(' Warning! AH might follow negative eigenvalues!\n')
warning_neg = True
diff_val = evals[idx] - old_val
old_val = evals[idx]
new_guess = guesses[0].clone()
new_guess.zero()
for num, c in enumerate(subspace_vec / lam):
new_guess.axpy(c, guesses[num])
# Build estimated sigma vector
new_dx = sigma_list[0].clone()
new_dx.zero()
for num, c in enumerate(subspace_vec):
new_dx.axpy(c, sigma_list[num])
# Consider restraints
new_dx.axpy(lam, orb_grad)
new_dx.axpy(old_val * lam, new_guess)
norm_dx = (new_dx.sum_of_squares() / orb_grad_ssq)**0.5
if print_micro:
core.print_out(" AH microiter %2d % 18.12e % 6.4e % 6.4e\n" % (microi, evals[idx],
diff_val / evals[idx], norm_dx))
if abs(old_val - wlast) < tol and norm_dx < (tol**0.5):
converged = True
break
# Apply preconditioner
tmp = precon.clone()
val = tmp.clone()
val.set(evals[idx])
tmp.subtract(val)
new_dx.apply_denominator(tmp)
guesses.append(new_dx)
sigma_list.append(mcscf_obj.compute_Hk(new_dx))
if print_micro and converged:
core.print_out("\n")
# core.print_out(" AH converged! \n\n")
#if not converged:
# core.print_out(" !Warning. Augmented Hessian did not converge.\n")
new_guess.scale(-1.0)
return converged, microi, new_guess
|
amjames/psi4
|
psi4/driver/procrouting/mcscf/augmented_hessian.py
|
Python
|
lgpl-3.0
| 5,801
|
[
"Psi4"
] |
ffcd6da7f11f012c609c6684836f1363755f6ef4ec98fdfb06c350df8e6383b6
|
"""
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
"""Check consistency on dataset iris."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
"""Check consistency on dataset boston house prices."""
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
"""Regression models should not have a classes_ attribute."""
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
"""Predict probabilities."""
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importance(name, X, y):
"""Check variable importances."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importance, name, X, y
def check_oob_score(name, X, y, n_estimators=20):
"""Check that oob prediction is a good estimation of the generalization
error."""
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
"""Check that base trees can be grid-searched."""
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
"""Check pickability."""
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
"""Check estimators on multi-output problems."""
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
"""Test that n_classes_ and classes_ have proper shape."""
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning a dense array.
'''
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
'''
Test that the `sparse_output` parameter of RandomTreesEmbedding
works by returning the same array for both argument
values.
'''
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
"""Test precedence of max_leaf_nodes over max_depth. """
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
"""Test if leaves contain more than leaf_count training examples"""
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_memory_layout(name, dtype):
"""Check that it works no matter the memory layout"""
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
if __name__ == "__main__":
import nose
nose.runmodule()
|
eickenberg/scikit-learn
|
sklearn/ensemble/tests/test_forest.py
|
Python
|
bsd-3-clause
| 22,872
|
[
"Brian"
] |
02ce38d1f4b2c8fb0c2288832052f977c25a3a8e75eaa5cd37b2d170e888b1f5
|
#!/usr/bin/env python
import unittest
import warnings
import numpy as np
from pymatgen.core.lattice import Lattice
from pymatgen.core.operations import SymmOp
from pymatgen.symmetry.groups import PointGroup, SpaceGroup, _get_symm_data
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__date__ = "4/10/14"
class PointGroupTest(unittest.TestCase):
def test_order(self):
order = {"mmm": 8, "432": 24, "-6m2": 12}
for k, v in order.items():
pg = PointGroup(k)
self.assertEqual(order[k], len(pg.symmetry_ops))
def test_get_orbit(self):
pg = PointGroup("mmm")
self.assertEqual(len(pg.get_orbit([0.1, 0.1, 0.1])), 8)
self.assertEqual(len(pg.get_orbit([0, 0, 0.1])), 2)
self.assertEqual(len(pg.get_orbit([1.2, 1.2, 1])), 8)
def test_is_sub_super_group(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
pgmmm = PointGroup("mmm")
pgmm2 = PointGroup("mm2")
pg222 = PointGroup("222")
pg4 = PointGroup("4")
self.assertTrue(pgmmm.is_supergroup(pgmm2))
self.assertTrue(pgmm2.is_subgroup(pgmmm))
self.assertTrue(pgmmm.is_supergroup(pg222))
self.assertFalse(pgmmm.is_supergroup(pg4))
pgm3m = PointGroup("m-3m")
pg6mmm = PointGroup("6/mmm")
pg3m = PointGroup("-3m")
# TODO: Fix the test below.
# self.assertTrue(pg3m.is_subgroup(pgm3m))
self.assertTrue(pg3m.is_subgroup(pg6mmm))
self.assertFalse(pgm3m.is_supergroup(pg6mmm))
class SpaceGroupTest(unittest.TestCase):
def test_renamed_e_symbols(self):
sg = SpaceGroup.from_int_number(64)
assert sg.symbol == "Cmce"
for sym, num in (
("Aem2", 39),
("Aea2", 41),
("Cmce", 64),
("Cmme", 67),
("Ccce", 68),
):
assert SpaceGroup(sym).int_number == num
def test_abbrev_symbols(self):
sg = SpaceGroup("P2/c")
self.assertEqual(sg.int_number, 13)
sg = SpaceGroup("R-3mH")
self.assertEqual(sg.int_number, 166)
def test_attr(self):
sg = SpaceGroup("Fm-3m")
self.assertEqual(sg.full_symbol, "F4/m-32/m")
self.assertEqual(sg.point_group, "m-3m")
def test_point_group_is_set(self):
for i in range(1, 231):
sg = SpaceGroup.from_int_number(i)
self.assertTrue(hasattr(sg, "point_group"))
for symbol in _get_symm_data("space_group_encoding"):
sg = SpaceGroup(symbol)
self.assertTrue(hasattr(sg, "point_group"))
def test_full_symbols(self):
sg = SpaceGroup("P2/m2/m2/m")
self.assertEqual(sg.symbol, "Pmmm")
def test_order_symm_ops(self):
for name in SpaceGroup.SG_SYMBOLS:
sg = SpaceGroup(name)
self.assertEqual(len(sg.symmetry_ops), sg.order)
def test_get_settings(self):
self.assertEqual({"Fm-3m(a-1/4,b-1/4,c-1/4)", "Fm-3m"}, SpaceGroup.get_settings("Fm-3m"))
self.assertEqual(
{
"Pmmn",
"Pmnm:1",
"Pnmm:2",
"Pmnm:2",
"Pnmm",
"Pnmm:1",
"Pmmn:1",
"Pmnm",
"Pmmn:2",
},
SpaceGroup.get_settings("Pmmn"),
)
self.assertEqual(
{"Pnmb", "Pman", "Pncm", "Pmna", "Pcnm", "Pbmn"},
SpaceGroup.get_settings("Pmna"),
)
def test_crystal_system(self):
sg = SpaceGroup("R-3c")
self.assertEqual(sg.crystal_system, "trigonal")
sg = SpaceGroup("R-3cH")
self.assertEqual(sg.crystal_system, "trigonal")
def test_get_orbit(self):
sg = SpaceGroup("Fm-3m")
p = np.random.randint(0, 100 + 1, size=(3,)) / 100
self.assertLessEqual(len(sg.get_orbit(p)), sg.order)
def test_is_compatible(self):
cubic = Lattice.cubic(1)
hexagonal = Lattice.hexagonal(1, 2)
rhom = Lattice.rhombohedral(3, 80)
tet = Lattice.tetragonal(1, 2)
ortho = Lattice.orthorhombic(1, 2, 3)
sg = SpaceGroup("Fm-3m")
self.assertTrue(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:H")
self.assertFalse(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("R-3m:R")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pnma")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P12/c1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup("P-1")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertTrue(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
sg = SpaceGroup("Pmmn:2")
self.assertTrue(sg.is_compatible(cubic))
self.assertTrue(sg.is_compatible(tet))
self.assertTrue(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertFalse(sg.is_compatible(hexagonal))
sg = SpaceGroup.from_int_number(165)
self.assertFalse(sg.is_compatible(cubic))
self.assertFalse(sg.is_compatible(tet))
self.assertFalse(sg.is_compatible(ortho))
self.assertFalse(sg.is_compatible(rhom))
self.assertTrue(sg.is_compatible(hexagonal))
def test_symmops(self):
sg = SpaceGroup("Pnma")
op = SymmOp.from_rotation_and_translation([[1, 0, 0], [0, -1, 0], [0, 0, -1]], [0.5, 0.5, 0.5])
self.assertIn(op, sg.symmetry_ops)
def test_other_settings(self):
sg = SpaceGroup("Pbnm")
self.assertEqual(sg.int_number, 62)
self.assertEqual(sg.order, 8)
self.assertRaises(ValueError, SpaceGroup, "hello")
def test_subgroup_supergroup(self):
with warnings.catch_warnings() as w:
warnings.simplefilter("ignore")
self.assertTrue(SpaceGroup("Pma2").is_subgroup(SpaceGroup("Pccm")))
self.assertFalse(SpaceGroup.from_int_number(229).is_subgroup(SpaceGroup.from_int_number(230)))
def test_hexagonal(self):
sgs = [146, 148, 155, 160, 161, 166, 167]
for sg in sgs:
s = SpaceGroup.from_int_number(sg, hexagonal=False)
self.assertTrue(not s.symbol.endswith("H"))
if __name__ == "__main__":
unittest.main()
|
davidwaroquiers/pymatgen
|
pymatgen/symmetry/tests/test_groups.py
|
Python
|
mit
| 7,233
|
[
"pymatgen"
] |
45676f89281262a6655f5d9bf58249e060855f4b41c68292c5c96cb09b05f021
|
# -*- coding: utf-8 -*-
"""
Unit tests for instructor.api methods.
"""
import datetime
import functools
import io
import json
import random
import shutil
import tempfile
import ddt
import pytz
from django.conf import settings
from django.contrib.auth.models import User
from django.core import mail
from django.core.files.uploadedfile import SimpleUploadedFile
from django.core.urlresolvers import reverse as django_reverse
from django.http import HttpRequest, HttpResponse
from django.test import RequestFactory, TestCase
from django.test.utils import override_settings
from django.utils.timezone import utc
from django.utils.translation import ugettext as _
from mock import Mock, patch
from nose.plugins.attrib import attr
from nose.tools import raises
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import UsageKey
import lms.djangoapps.instructor.views.api
import lms.djangoapps.instructor_task.api
from bulk_email.models import BulkEmailFlag, CourseEmail, CourseEmailTemplate
from certificates.models import CertificateStatuses
from certificates.tests.factories import GeneratedCertificateFactory
from course_modes.models import CourseMode
from courseware.models import StudentFieldOverride, StudentModule
from courseware.tests.factories import (
BetaTesterFactory,
GlobalStaffFactory,
InstructorFactory,
StaffFactory,
UserProfileFactory
)
from courseware.tests.helpers import LoginEnrollmentTestCase
from django_comment_common.models import FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
from lms.djangoapps.instructor.tests.utils import FakeContentTask, FakeEmail, FakeEmailInfo
from lms.djangoapps.instructor.views.api import (
_split_input_list,
common_exceptions_400,
generate_unique_password,
require_finance_admin
)
from lms.djangoapps.instructor_task.api_helper import (
AlreadyRunningError,
QueueConnectionError,
generate_already_running_error_message
)
from openedx.core.djangoapps.course_groups.cohorts import set_course_cohorted
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.djangoapps.site_configuration.tests.mixins import SiteMixin
from openedx.core.lib.xblock_utils import grade_histogram
from shoppingcart.models import (
Coupon,
CouponRedemption,
CourseRegistrationCode,
CourseRegistrationCodeInvoiceItem,
Invoice,
InvoiceTransaction,
Order,
PaidCourseRegistration,
RegistrationCodeRedemption
)
from shoppingcart.pdf import PDFInvoice
from student.models import (
ALLOWEDTOENROLL_TO_ENROLLED,
ALLOWEDTOENROLL_TO_UNENROLLED,
ENROLLED_TO_ENROLLED,
ENROLLED_TO_UNENROLLED,
UNENROLLED_TO_ALLOWEDTOENROLL,
UNENROLLED_TO_ENROLLED,
UNENROLLED_TO_UNENROLLED,
CourseEnrollment,
CourseEnrollmentAllowed,
ManualEnrollmentAudit,
NonExistentCourseError
)
from student.roles import CourseBetaTesterRole, CourseFinanceAdminRole, CourseInstructorRole, CourseSalesAdminRole
from student.tests.factories import AdminFactory, CourseModeFactory, UserFactory
from xmodule.fields import Date
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase, SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from .test_tools import msk_from_problem_urlname
DATE_FIELD = Date()
EXPECTED_CSV_HEADER = (
'"code","redeem_code_url","course_id","company_name","created_by","redeemed_by","invoice_id","purchaser",'
'"customer_reference_number","internal_reference"'
)
EXPECTED_COUPON_CSV_HEADER = '"Coupon Code","Course Id","% Discount","Description","Expiration Date",' \
'"Is Active","Code Redeemed Count","Total Discounted Seats","Total Discounted Amount"'
# ddt data for test cases involving reports
REPORTS_DATA = (
{
'report_type': 'grade',
'instructor_api_endpoint': 'calculate_grades_csv',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_grades_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrolled learner profile',
'instructor_api_endpoint': 'get_students_features',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_students_features_csv',
'extra_instructor_api_kwargs': {'csv': '/csv'}
},
{
'report_type': 'detailed enrollment',
'instructor_api_endpoint': 'get_enrollment_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_detailed_enrollment_features_csv',
'extra_instructor_api_kwargs': {}
},
{
'report_type': 'enrollment',
'instructor_api_endpoint': 'get_students_who_may_enroll',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'proctored exam results',
'instructor_api_endpoint': 'get_proctored_exam_results',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report',
'extra_instructor_api_kwargs': {},
},
{
'report_type': 'problem responses',
'instructor_api_endpoint': 'get_problem_responses',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv',
'extra_instructor_api_kwargs': {},
}
)
# ddt data for test cases involving executive summary report
EXECUTIVE_SUMMARY_DATA = (
{
'report_type': 'executive summary',
'task_type': 'exec_summary_report',
'instructor_api_endpoint': 'get_exec_summary_report',
'task_api_endpoint': 'lms.djangoapps.instructor_task.api.submit_executive_summary_report',
'extra_instructor_api_kwargs': {}
},
)
INSTRUCTOR_GET_ENDPOINTS = set([
'get_anon_ids',
'get_coupon_codes',
'get_issued_certificates',
'get_sale_order_records',
'get_sale_records',
])
INSTRUCTOR_POST_ENDPOINTS = set([
'active_registration_codes',
'add_users_to_cohorts',
'bulk_beta_modify_access',
'calculate_grades_csv',
'change_due_date',
'export_ora2_data',
'generate_registration_codes',
'get_enrollment_report',
'get_exec_summary_report',
'get_grading_config',
'get_problem_responses',
'get_proctored_exam_results',
'get_registration_codes',
'get_student_progress_url',
'get_students_features',
'get_students_who_may_enroll',
'get_user_invoice_preference',
'list_background_email_tasks',
'list_course_role_members',
'list_email_content',
'list_entrance_exam_instructor_tasks',
'list_financial_report_downloads',
'list_forum_members',
'list_instructor_tasks',
'list_report_downloads',
'mark_student_can_skip_entrance_exam',
'modify_access',
'register_and_enroll_students',
'rescore_entrance_exam',
'rescore_problem',
'reset_due_date',
'reset_student_attempts',
'reset_student_attempts_for_entrance_exam',
'sale_validation',
'show_student_extensions',
'show_unit_extensions',
'send_email',
'spent_registration_codes',
'students_update_enrollment',
'update_forum_role_membership',
])
def reverse(endpoint, args=None, kwargs=None, is_dashboard_endpoint=True):
"""
Simple wrapper of Django's reverse that first ensures that we have declared
each endpoint under test.
Arguments:
args: The args to be passed through to reverse.
endpoint: The endpoint to be passed through to reverse.
kwargs: The kwargs to be passed through to reverse.
is_dashboard_endpoint: True if this is an instructor dashboard endpoint
that must be declared in the INSTRUCTOR_GET_ENDPOINTS or
INSTRUCTOR_GET_ENDPOINTS sets, or false otherwise.
Returns:
The return of Django's reverse function
"""
is_endpoint_declared = endpoint in INSTRUCTOR_GET_ENDPOINTS or endpoint in INSTRUCTOR_POST_ENDPOINTS
if is_dashboard_endpoint and is_endpoint_declared is False:
# Verify that all endpoints are declared so we can ensure they are
# properly validated elsewhere.
raise ValueError("The endpoint {} must be declared in ENDPOINTS before use.".format(endpoint))
return django_reverse(endpoint, args=args, kwargs=kwargs)
@common_exceptions_400
def view_success(request): # pylint: disable=unused-argument
"A dummy view for testing that returns a simple HTTP response"
return HttpResponse('success')
@common_exceptions_400
def view_user_doesnotexist(request): # pylint: disable=unused-argument
"A dummy view that raises a User.DoesNotExist exception"
raise User.DoesNotExist()
@common_exceptions_400
def view_alreadyrunningerror(request): # pylint: disable=unused-argument
"A dummy view that raises an AlreadyRunningError exception"
raise AlreadyRunningError()
@common_exceptions_400
def view_queue_connection_error(request): # pylint: disable=unused-argument
"""
A dummy view that raises a QueueConnectionError exception.
"""
raise QueueConnectionError()
@attr(shard=1)
@ddt.ddt
class TestCommonExceptions400(TestCase):
"""
Testing the common_exceptions_400 decorator.
"""
def setUp(self):
super(TestCommonExceptions400, self).setUp()
self.request = Mock(spec=HttpRequest)
self.request.META = {}
def test_happy_path(self):
resp = view_success(self.request)
self.assertEqual(resp.status_code, 200)
def test_user_doesnotexist(self):
self.request.is_ajax.return_value = False
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_user_doesnotexist_ajax(self):
self.request.is_ajax.return_value = True
resp = view_user_doesnotexist(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("User does not exist", resp.content)
def test_alreadyrunningerror(self):
self.request.is_ajax.return_value = False
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Requested task is already running", resp.content)
def test_alreadyrunningerror_ajax(self):
self.request.is_ajax.return_value = True
resp = view_alreadyrunningerror(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn("Requested task is already running", resp.content)
@ddt.data(True, False)
def test_queue_connection_error(self, is_ajax):
"""
Tests that QueueConnectionError exception is handled in common_exception_400.
"""
self.request.is_ajax.return_value = is_ajax
resp = view_queue_connection_error(self.request) # pylint: disable=assignment-from-no-return
self.assertEqual(resp.status_code, 400)
self.assertIn('Error occured. Please try again later', resp.content)
@attr(shard=1)
@ddt.ddt
class TestEndpointHttpMethods(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users can make GET requests against endpoints that allow GET,
and not against those that don't allow GET.
"""
@classmethod
def setUpClass(cls):
"""
Set up test course.
"""
super(TestEndpointHttpMethods, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
"""
Set up global staff role so authorization will not fail.
"""
super(TestEndpointHttpMethods, self).setUp()
global_user = GlobalStaffFactory()
self.client.login(username=global_user.username, password='test')
@ddt.data(*INSTRUCTOR_POST_ENDPOINTS)
def test_endpoints_reject_get(self, data):
"""
Tests that POST endpoints are rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertEqual(
response.status_code, 405,
"Endpoint {} returned status code {} instead of a 405. It should not allow GET.".format(
data, response.status_code
)
)
@ddt.data(*INSTRUCTOR_GET_ENDPOINTS)
def test_endpoints_accept_get(self, data):
"""
Tests that GET endpoints are not rejected with 405 when using GET.
"""
url = reverse(data, kwargs={'course_id': unicode(self.course.id)})
response = self.client.get(url)
self.assertNotEqual(
response.status_code, 405,
"Endpoint {} returned status code 405 where it shouldn't, since it should allow GET.".format(
data
)
)
@attr(shard=1)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorAPIDenyLevels(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Ensure that users cannot access endpoints they shouldn't be able to.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIDenyLevels, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorAPIDenyLevels, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorAPIDenyLevels, self).setUp()
self.user = UserFactory.create()
CourseEnrollment.enroll(self.user, self.course.id)
_module = StudentModule.objects.create(
student=self.user,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
# Endpoints that only Staff or Instructors can access
self.staff_level_endpoints = [
('students_update_enrollment',
{'identifiers': 'foo@example.org', 'action': 'enroll'}),
('get_grading_config', {}),
('get_students_features', {}),
('get_student_progress_url', {'unique_student_identifier': self.user.username}),
('reset_student_attempts',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
('update_forum_role_membership',
{'unique_student_identifier': self.user.email, 'rolename': 'Moderator', 'action': 'allow'}),
('list_forum_members', {'rolename': FORUM_ROLE_COMMUNITY_TA}),
('send_email', {'send_to': '["staff"]', 'subject': 'test', 'message': 'asdf'}),
('list_instructor_tasks', {}),
('list_background_email_tasks', {}),
('list_report_downloads', {}),
('list_financial_report_downloads', {}),
('calculate_grades_csv', {}),
('get_students_features', {}),
('get_enrollment_report', {}),
('get_students_who_may_enroll', {}),
('get_exec_summary_report', {}),
('get_proctored_exam_results', {}),
('get_problem_responses', {}),
('export_ora2_data', {}),
]
# Endpoints that only Instructors can access
self.instructor_level_endpoints = [
('bulk_beta_modify_access', {'identifiers': 'foo@example.org', 'action': 'add'}),
('modify_access', {'unique_student_identifier': self.user.email, 'rolename': 'beta', 'action': 'allow'}),
('list_course_role_members', {'rolename': 'beta'}),
('rescore_problem',
{'problem_to_reset': self.problem_urlname, 'unique_student_identifier': self.user.email}),
]
def _access_endpoint(self, endpoint, args, status_code, msg):
"""
Asserts that accessing the given `endpoint` gets a response of `status_code`.
endpoint: string, endpoint for instructor dash API
args: dict, kwargs for `reverse` call
status_code: expected HTTP status code response
msg: message to display if assertion fails.
"""
url = reverse(endpoint, kwargs={'course_id': self.course.id.to_deprecated_string()})
if endpoint in INSTRUCTOR_GET_ENDPOINTS:
response = self.client.get(url, args)
else:
response = self.client.post(url, args)
self.assertEqual(
response.status_code,
status_code,
msg=msg
)
def test_student_level(self):
"""
Ensure that an enrolled student can't access staff or instructor endpoints.
"""
self.client.login(username=self.user.username, password='test')
for endpoint, args in self.staff_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Student should not be allowed to access endpoint " + endpoint
)
def _access_problem_responses_endpoint(self, msg):
"""
Access endpoint for problem responses report, ensuring that
UsageKey.from_string returns a problem key that the endpoint
can work with.
msg: message to display if assertion fails.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
self._access_endpoint('get_problem_responses', {}, 200, msg)
def test_staff_level(self):
"""
Ensure that a staff member can't access instructor endpoints.
"""
staff_member = StaffFactory(course_key=self.course.id)
CourseEnrollment.enroll(staff_member, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(staff_member)
self.client.login(username=staff_member.username, password='test')
# Try to promote to forums admin - not working
# update_forum_role(self.course.id, staff_member, FORUM_ROLE_ADMINISTRATOR, 'allow')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership', 'list_forum_members']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Staff member should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Staff member should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
self._access_endpoint(
endpoint,
args,
403,
"Staff member should not be allowed to access endpoint " + endpoint
)
def test_instructor_level(self):
"""
Ensure that an instructor member can access all endpoints.
"""
inst = InstructorFactory(course_key=self.course.id)
CourseEnrollment.enroll(inst, self.course.id)
CourseFinanceAdminRole(self.course.id).add_users(inst)
self.client.login(username=inst.username, password='test')
for endpoint, args in self.staff_level_endpoints:
expected_status = 200
# TODO: make these work
if endpoint in ['update_forum_role_membership']:
continue
elif endpoint == 'get_problem_responses':
self._access_problem_responses_endpoint(
"Instructor should be allowed to access endpoint " + endpoint
)
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
for endpoint, args in self.instructor_level_endpoints:
expected_status = 200
# TODO: make this work
if endpoint in ['rescore_problem']:
continue
self._access_endpoint(
endpoint,
args,
expected_status,
"Instructor should be allowed to access endpoint " + endpoint
)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
class TestInstructorAPIBulkAccountCreationAndEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test Bulk account creation and enrollment from csv file
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Create a course with mode 'audit'
cls.audit_course = CourseFactory.create()
CourseModeFactory.create(course_id=cls.audit_course.id, mode_slug=CourseMode.AUDIT)
cls.url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.course.id)}
)
cls.audit_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(cls.audit_course.id)}
)
def setUp(self):
super(TestInstructorAPIBulkAccountCreationAndEnrollment, self).setUp()
# Create a course with mode 'honor' and with price
self.white_label_course = CourseFactory.create()
self.white_label_course_mode = CourseModeFactory.create(
course_id=self.white_label_course.id,
mode_slug=CourseMode.HONOR,
min_price=10,
suggested_prices='10',
)
self.white_label_course_url = reverse(
'register_and_enroll_students', kwargs={'course_id': unicode(self.white_label_course.id)}
)
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.audit_course_instructor = InstructorFactory(course_key=self.audit_course.id)
self.white_label_course_instructor = InstructorFactory(course_key=self.white_label_course.id)
self.client.login(username=self.instructor.username, password='test')
self.not_enrolled_student = UserFactory(
username='NotEnrolledStudent',
email='nonenrolled@test.com',
first_name='NotEnrolled',
last_name='Student'
)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_account_creation_and_enrollment_with_csv_with_blank_lines(self, info_log):
"""
Happy path test to create a single new user
"""
csv_content = "\ntest_student@example.com,test_student_1,tester1,USA\n\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with('email sent to new created user at %s', 'test_student@example.com')
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_email_and_username_already_exist(self, info_log):
"""
If the email address and username already exists
and the user is enrolled in the course, do nothing (including no email gets sent out)
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# test the log for email that's send to new created user.
info_log.assert_called_with(
u"user already exists with username '%s' and email '%s'",
'test_student_1',
'test_student@example.com'
)
def test_file_upload_type_not_csv(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.jpg", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Make sure that the file you upload is in CSV format with no extraneous characters or rows.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_bad_file_upload_type(self):
"""
Try uploading some non-CSV file and verify that it is rejected
"""
uploaded_file = SimpleUploadedFile("temp.csv", io.BytesIO(b"some initial binary data: \x00\x01").read())
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'Could not read uploaded file.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_insufficient_data(self):
"""
Try uploading a CSV file which does not have the exact four columns of data
"""
csv_content = "test_student@example.com,test_student_1\n"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 1)
self.assertEquals(data['general_errors'][0]['response'], 'Data in row #1 must have exactly four columns: email, username, full name, and country')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_invalid_email_in_csv(self):
"""
Test failure case of a poorly formatted email field
"""
csv_content = "test_student.example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
data = json.loads(response.content)
self.assertEqual(response.status_code, 200)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Invalid email {0}.'.format('test_student.example.com'))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch('lms.djangoapps.instructor.views.api.log.info')
def test_csv_user_exist_and_not_enrolled(self, info_log):
"""
If the email address and username already exists
and the user is not enrolled in the course, enrolled him/her and iterate to next one.
"""
csv_content = "nonenrolled@test.com,NotEnrolledStudent,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
info_log.assert_called_with(
u'user %s enrolled in the course %s',
u'NotEnrolledStudent',
self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_email_in_csv(self):
"""
If the email address already exists, but the username is different,
assume it is the correct user and just register the user in the course.
"""
csv_content = "test_student@example.com,test_student_1,tester1,USA\n" \
"test_student@example.com,test_student_2,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
warning_message = 'An account with email {email} exists but the provided username {username} ' \
'is different. Enrolling anyway with {email}.'.format(email='test_student@example.com', username='test_student_2')
self.assertNotEquals(len(data['warnings']), 0)
self.assertEquals(data['warnings'][0]['response'], warning_message)
user = User.objects.get(email='test_student@example.com')
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertTrue(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
def test_user_with_already_existing_username_in_csv(self):
"""
If the username already exists (but not the email),
assume it is a different user and fail to create the new account.
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
def test_csv_file_not_attached(self):
"""
Test when the user does not attach a file
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'file_not_found': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['general_errors']), 0)
self.assertEquals(data['general_errors'][0]['response'], 'File is not attached.')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_raising_exception_in_auto_registration_and_enrollment_case(self):
"""
Test that exceptions are handled well
"""
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student2@example.com,test_student_1,tester2,US"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
with patch('lms.djangoapps.instructor.views.api.create_manual_course_enrollment') as mock:
mock.side_effect = NonExistentCourseError()
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'NonExistentCourseError')
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
def test_generate_unique_password(self):
"""
generate_unique_password should generate a unique password string that excludes certain characters.
"""
password = generate_unique_password([], 12)
self.assertEquals(len(password), 12)
for letter in password:
self.assertNotIn(letter, 'aAeEiIoOuU1l')
def test_users_created_and_enrolled_successfully_if_others_fail(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA\n" \
"test_student3@example.com,test_student_1,tester3,CA\n" \
"test_student2@example.com,test_student_2,tester2,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertNotEquals(len(data['row_errors']), 0)
self.assertEquals(data['row_errors'][0]['response'], 'Username {user} already exists.'.format(user='test_student_1'))
self.assertTrue(User.objects.filter(username='test_student_1', email='test_student1@example.com').exists())
self.assertTrue(User.objects.filter(username='test_student_2', email='test_student2@example.com').exists())
self.assertFalse(User.objects.filter(email='test_student3@example.com').exists())
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
@patch.object(lms.djangoapps.instructor.views.api, 'generate_random_string',
Mock(side_effect=['first', 'first', 'second']))
def test_generate_unique_password_no_reuse(self):
"""
generate_unique_password should generate a unique password string that hasn't been generated before.
"""
generated_password = ['first']
password = generate_unique_password(generated_password, 12)
self.assertNotEquals(password, 'first')
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': False})
def test_allow_automated_signups_flag_not_set(self):
csv_content = "test_student1@example.com,test_student_1,tester1,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.url, {'students_list': uploaded_file})
self.assertEquals(response.status_code, 403)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_audit_enrollment_mode(self):
"""
Test that enrollment mode for audit courses (paid courses) is 'audit'.
"""
# Login Audit Course instructor
self.client.login(username=self.audit_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.audit_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'audit'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.AUDIT)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_honor_enrollment_mode(self):
"""
Test that enrollment mode for unpaid honor courses is 'honor'.
"""
# Remove white label course price
self.white_label_course_mode.min_price = 0
self.white_label_course_mode.suggested_prices = ''
self.white_label_course_mode.save() # pylint: disable=no-member
# Login Audit Course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be 'honor'
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.HONOR)
@patch.dict(settings.FEATURES, {'ALLOW_AUTOMATED_SIGNUPS': True})
def test_default_shopping_cart_enrollment_mode_for_white_label(self):
"""
Test that enrollment mode for white label courses (paid courses) is DEFAULT_SHOPPINGCART_MODE_SLUG.
"""
# Login white label course instructor
self.client.login(username=self.white_label_course_instructor.username, password='test')
csv_content = "test_student_wl@example.com,test_student_wl,Test Student,USA"
uploaded_file = SimpleUploadedFile("temp.csv", csv_content)
response = self.client.post(self.white_label_course_url, {'students_list': uploaded_file})
self.assertEqual(response.status_code, 200)
data = json.loads(response.content)
self.assertEquals(len(data['row_errors']), 0)
self.assertEquals(len(data['warnings']), 0)
self.assertEquals(len(data['general_errors']), 0)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
# Verify enrollment modes to be CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG
for enrollment in manual_enrollments:
self.assertEqual(enrollment.enrollment.mode, CourseMode.DEFAULT_SHOPPINGCART_MODE_SLUG)
@attr(shard=1)
@ddt.ddt
class TestInstructorAPIEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test enrollment modification endpoint.
This test does NOT exhaustively test state changes, that is the
job of test_enrollment. This tests the response and action switch.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIEnrollment, self).setUp()
self.request = RequestFactory().request()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.enrolled_student = UserFactory(username='EnrolledStudent', first_name='Enrolled', last_name='Student')
CourseEnrollment.enroll(
self.enrolled_student,
self.course.id
)
self.notenrolled_student = UserFactory(username='NotEnrolledStudent', first_name='NotEnrolled',
last_name='Student')
# Create invited, but not registered, user
cea = CourseEnrollmentAllowed(email='robot-allowed@robot.org', course_id=self.course.id)
cea.save()
self.allowed_email = 'robot-allowed@robot.org'
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': action})
self.assertEqual(response.status_code, 400)
def test_invalid_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': 'percivaloctavius@', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius@',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_invalid_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': 'percivaloctavius', 'action': 'enroll', 'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": 'percivaloctavius',
"invalidIdentifier": True,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_with_username(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'enroll',
'email_students': False})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
'auto_enroll': False,
"results": [
{
"identifier": self.notenrolled_student.username,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_enroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'enroll',
'email_students': False})
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
@ddt.data('http', 'https')
def test_enroll_with_email(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notenrolled_student.email): {}".format(type(self.notenrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now enrolled
user = User.objects.get(email=self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notenrolled_student.email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been enrolled in {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear NotEnrolled Student\n\nYou have been enrolled in {} "
"at edx.org by a member of the course staff. "
"The course should now appear on your edx.org dashboard.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to NotEnrolled Student".format(
self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the "
"registration form making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account, "
"visit {proto}://{site}{about_path} to join the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name, proto=protocol, site=self.site_name, about_path=self.about_path
)
)
@ddt.data('http', 'https')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_mktgsite(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"You can then enroll in {display_name}.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name, proto=protocol, site=self.site_name
)
)
@ddt.data('http', 'https')
def test_enroll_with_email_not_registered_autoenroll(self, protocol):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been invited to register for {}'.format(self.course.display_name)
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To finish your registration, please visit {proto}://{site}/register and fill out the registration form "
"making sure to use robot-not-an-email-yet@robot.org in the E-mail field.\n"
"Once you have registered and activated your account,"
" you will see {display_name} listed on your dashboard.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, display_name=self.course.display_name
)
)
def test_unenroll_without_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': False})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_unenroll_with_email(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.enrolled_student.email, 'action': 'unenroll',
'email_students': True})
print "type(self.enrolled_student.email): {}".format(type(self.enrolled_student.email))
self.assertEqual(response.status_code, 200)
# test that the user is now unenrolled
user = User.objects.get(email=self.enrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(user, self.course.id))
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.enrolled_student.email,
"before": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Enrolled Student\n\nYou have been un-enrolled in {display_name} "
"at edx.org by a member of the course staff. "
"The course will no longer appear on your edx.org dashboard.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to Enrolled Student".format(
display_name=self.course.display_name,
)
)
def test_unenroll_with_email_allowed_student(self):
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.allowed_email, 'action': 'unenroll', 'email_students': True})
print "type(self.allowed_email): {}".format(type(self.allowed_email))
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.allowed_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": True,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ALLOWEDTOENROLL_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been un-enrolled from {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear Student,\n\nYou have been un-enrolled from course {display_name} by a member of the course staff. "
"Please disregard the invitation previously sent.\n\n----\n"
"This email was automatically sent from edx.org to robot-allowed@robot.org".format(
display_name=self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name} at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{about_path} and register for the course.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
proto=protocol, site=self.site_name, about_path=self.about_path,
display_name=self.course.display_name,
)
)
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_enroll_email_not_registered_shib_mktgsite(self, mock_uses_shib):
# Try with marketing site enabled and shib on
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
# Try with marketing site enabled
with patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}):
response = self.client.post(url, {'identifiers': self.notregistered_email, 'action': 'enroll',
'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {} at edx.org by a member of the course staff.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
self.course.display_name,
)
)
@ddt.data('http', 'https')
@patch('lms.djangoapps.instructor.enrollment.uses_shib')
def test_enroll_with_email_not_registered_with_shib_autoenroll(self, protocol, mock_uses_shib):
mock_uses_shib.return_value = True
url = reverse('students_update_enrollment', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
print "type(self.notregistered_email): {}".format(type(self.notregistered_email))
self.assertEqual(response.status_code, 200)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to register for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear student,\n\nYou have been invited to join {display_name}"
" at edx.org by a member of the course staff.\n\n"
"To access the course visit {proto}://{site}{course_path} and login.\n\n----\n"
"This email was automatically sent from edx.org to robot-not-an-email-yet@robot.org".format(
display_name=self.course.display_name,
proto=protocol, site=self.site_name, course_path=self.course_path
)
)
def test_enroll_already_enrolled_student(self):
"""
Ensure that already enrolled "verified" students cannot be downgraded
to "honor"
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# make this enrollment "verified"
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
# now re-enroll the student through the instructor dash
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
# affirm that the student is still in "verified" mode
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, ENROLLED_TO_ENROLLED)
self.assertEqual(course_enrollment.mode, u"verified")
def create_paid_course(self):
"""
create paid course mode.
"""
paid_course = CourseFactory.create()
CourseModeFactory.create(course_id=paid_course.id, min_price=50, mode_slug=CourseMode.HONOR)
CourseInstructorRole(paid_course.id).add_users(self.instructor)
return paid_course
def test_reason_field_should_not_be_empty(self):
"""
test to check that reason field should not be empty when
manually enrolling the students for the paid courses.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 0)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"error": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_allowed_to_enroll_user(self):
"""
test to unenroll allow to enroll user.
"""
paid_course = self.create_paid_course()
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing..'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_ALLOWEDTOENROLL)
self.assertEqual(response.status_code, 200)
# now registered the user
UserFactory(email=self.notregistered_email)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'enroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 2)
self.assertEqual(manual_enrollments[1].state_transition, ALLOWEDTOENROLL_TO_ENROLLED)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "enroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": True,
"allowed": True,
},
"after": {
"enrollment": True,
"auto_enroll": False,
"user": True,
"allowed": True,
}
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenrolled_already_not_enrolled_user(self):
"""
test unenrolled user already not enrolled in a course.
"""
paid_course = self.create_paid_course()
course_enrollment = CourseEnrollment.objects.filter(
user__email=self.notregistered_email, course_id=paid_course.id
)
self.assertEqual(course_enrollment.count(), 0)
url = reverse('students_update_enrollment', kwargs={'course_id': paid_course.id.to_deprecated_string()})
params = {'identifiers': self.notregistered_email, 'action': 'unenroll', 'email_students': False,
'auto_enroll': False, 'reason': 'testing'}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "unenroll",
"auto_enroll": False,
"results": [
{
"identifier": self.notregistered_email,
"before": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
},
"after": {
"enrollment": False,
"auto_enroll": False,
"user": False,
"allowed": False,
}
}
]
}
manual_enrollments = ManualEnrollmentAudit.objects.all()
self.assertEqual(manual_enrollments.count(), 1)
self.assertEqual(manual_enrollments[0].state_transition, UNENROLLED_TO_UNENROLLED)
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_unenroll_and_enroll_verified(self):
"""
Test that unenrolling and enrolling a student from a verified track
results in that student being in the default track
"""
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
# upgrade enrollment
course_enrollment.mode = u'verified'
course_enrollment.save()
self.assertEqual(course_enrollment.mode, u'verified')
self._change_student_enrollment(self.enrolled_student, self.course, 'unenroll')
self._change_student_enrollment(self.enrolled_student, self.course, 'enroll')
course_enrollment = CourseEnrollment.objects.get(
user=self.enrolled_student, course_id=self.course.id
)
self.assertEqual(course_enrollment.mode, CourseMode.DEFAULT_MODE_SLUG)
def _change_student_enrollment(self, user, course, action):
"""
Helper function that posts to 'students_update_enrollment' to change
a student's enrollment
"""
url = reverse(
'students_update_enrollment',
kwargs={'course_id': course.id.to_deprecated_string()},
)
params = {
'identifiers': user.email,
'action': action,
'email_students': True,
'reason': 'change user enrollment'
}
response = self.client.post(url, params)
self.assertEqual(response.status_code, 200)
return response
@attr(shard=1)
@ddt.ddt
class TestInstructorAPIBulkBetaEnrollment(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test bulk beta modify access endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIBulkBetaEnrollment, cls).setUpClass()
cls.course = CourseFactory.create()
# Email URL values
cls.site_name = configuration_helpers.get_value(
'SITE_NAME',
settings.SITE_NAME
)
cls.about_path = '/courses/{}/about'.format(cls.course.id)
cls.course_path = '/courses/{}/'.format(cls.course.id)
def setUp(self):
super(TestInstructorAPIBulkBetaEnrollment, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.beta_tester = BetaTesterFactory(course_key=self.course.id)
CourseEnrollment.enroll(
self.beta_tester,
self.course.id
)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
self.notenrolled_student = UserFactory(username='NotEnrolledStudent')
self.notregistered_email = 'robot-not-an-email-yet@robot.org'
self.assertEqual(User.objects.filter(email=self.notregistered_email).count(), 0)
self.request = RequestFactory().request()
# uncomment to enable enable printing of large diffs
# from failed assertions in the event of a test failure.
# (comment because pylint C0103(invalid-name))
# self.maxDiff = None
def test_missing_params(self):
""" Test missing all query parameters. """
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_bad_action(self):
""" Test with an invalid action. """
action = 'robot-not-an-action'
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.beta_tester.email, 'action': action})
self.assertEqual(response.status_code, 400)
def add_notenrolled(self, response, identifier):
"""
Test Helper Method (not a test, called by other tests)
Takes a client response from a call to bulk_beta_modify_access with 'email_students': False,
and the student identifier (email or username) given as 'identifiers' in the request.
Asserts the reponse returns cleanly, that the student was added as a beta tester, and the
response properly contains their identifier, 'error': False, and 'userDoesNotExist': False.
Additionally asserts no email was sent.
"""
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": identifier,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_add_notenrolled_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_email_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.email)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertFalse(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
def test_add_notenrolled_username_autoenroll(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.username, 'action': 'add', 'email_students': False, 'auto_enroll': True})
self.add_notenrolled(response, self.notenrolled_student.username)
self.assertTrue(CourseEnrollment.is_enrolled(self.notenrolled_student, self.course.id))
@ddt.data('http', 'https')
def test_add_notenrolled_with_email(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"Visit {proto}://{site}{about_path} to join "
"the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
about_path=self.about_path
)
)
@ddt.data('http', 'https')
def test_add_notenrolled_with_email_autoenroll(self, protocol):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
params = {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True,
'auto_enroll': True}
environ = {'wsgi.url_scheme': protocol}
response = self.client.post(url, params, **environ)
self.assertEqual(response.status_code, 200)
self.assertTrue(CourseBetaTesterRole(self.course.id).has_user(self.notenrolled_student))
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notenrolled_student.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
'You have been invited to a beta test for {display_name}'.format(display_name=self.course.display_name)
)
self.assertEqual(
mail.outbox[0].body,
u"Dear {student_name}\n\nYou have been invited to be a beta tester "
"for {display_name} at edx.org by a member of the course staff.\n\n"
"To start accessing course materials, please visit "
"{proto}://{site}{course_path}\n\n----\n"
"This email was automatically sent from edx.org to {student_email}".format(
display_name=self.course.display_name,
student_name=self.notenrolled_student.profile.name,
student_email=self.notenrolled_student.email,
proto=protocol,
site=self.site_name,
course_path=self.course_path
)
)
@patch.dict(settings.FEATURES, {'ENABLE_MKTG_SITE': True})
def test_add_notenrolled_email_mktgsite(self):
# Try with marketing site enabled
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'identifiers': self.notenrolled_student.email, 'action': 'add', 'email_students': True})
self.assertEqual(response.status_code, 200)
self.assertEqual(
mail.outbox[0].body,
u"Dear {}\n\nYou have been invited to be a beta tester "
"for {} at edx.org by a member of the course staff.\n\n"
"Visit edx.org to enroll in the course and begin the beta test.\n\n----\n"
"This email was automatically sent from edx.org to {}".format(
self.notenrolled_student.profile.name,
self.course.display_name,
self.notenrolled_student.email,
)
)
def test_enroll_with_email_not_registered(self):
# User doesn't exist
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.notregistered_email, 'action': 'add', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# test the response data
expected = {
"action": "add",
"results": [
{
"identifier": self.notregistered_email,
"error": True,
"userDoesNotExist": True,
"is_active": None
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_without_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': False,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 0)
def test_remove_with_email(self):
url = reverse('bulk_beta_modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url,
{'identifiers': self.beta_tester.email, 'action': 'remove', 'email_students': True,
'reason': 'testing'})
self.assertEqual(response.status_code, 200)
# Works around a caching bug which supposedly can't happen in prod. The instance here is not ==
# the instance fetched from the email above which had its cache cleared
if hasattr(self.beta_tester, '_roles'):
del self.beta_tester._roles
self.assertFalse(CourseBetaTesterRole(self.course.id).has_user(self.beta_tester))
# test the response data
expected = {
"action": "remove",
"results": [
{
"identifier": self.beta_tester.email,
"error": False,
"userDoesNotExist": False,
"is_active": True
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
# Check the outbox
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(
mail.outbox[0].subject,
u'You have been removed from a beta test for {display_name}'.format(display_name=self.course.display_name,)
)
self.assertEqual(
mail.outbox[0].body,
"Dear {full_name}\n\nYou have been removed as a beta tester for "
"{display_name} at edx.org by a member of the course staff. "
"The course will remain on your dashboard, but you will no longer "
"be part of the beta testing group.\n\n"
"Your other courses have not been affected.\n\n----\n"
"This email was automatically sent from edx.org to {email_address}".format(
display_name=self.course.display_name,
full_name=self.beta_tester.profile.name,
email_address=self.beta_tester.email
)
)
@attr(shard=1)
class TestInstructorAPILevelsAccess(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change permissions
of other users.
This test does NOT test whether the actions had an effect on the
database, that is the job of test_access.
This tests the response and action switch.
Actually, modify_access does not have a very meaningful
response yet, so only the status code is tested.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsAccess, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsAccess, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.other_instructor = InstructorFactory(course_key=self.course.id)
self.other_staff = StaffFactory(course_key=self.course.id)
self.other_user = UserFactory()
def test_modify_access_noparams(self):
""" Test missing all query parameters. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_action(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'robot-not-an-action',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_bad_role(self):
""" Test with an invalid action parameter. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'robot-not-a-roll',
'action': 'revoke',
})
self.assertEqual(response.status_code, 400)
def test_modify_access_allow(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.email,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_allow_with_uname(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_instructor.username,
'rolename': 'staff',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_with_username(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.username,
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_with_fake_user(self):
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': 'GandalfTheGrey',
'rolename': 'staff',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': 'GandalfTheGrey',
'userDoesNotExist': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_with_inactive_user(self):
self.other_user.is_active = False
self.other_user.save() # pylint: disable=no-member
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_user.username,
'rolename': 'beta',
'action': 'allow',
})
self.assertEqual(response.status_code, 200)
expected = {
'unique_student_identifier': self.other_user.username,
'inactiveUser': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_modify_access_revoke_not_allowed(self):
""" Test revoking access that a user does not have. """
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.other_staff.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
def test_modify_access_revoke_self(self):
"""
Test that an instructor cannot remove instructor privelages from themself.
"""
url = reverse('modify_access', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'unique_student_identifier': self.instructor.email,
'rolename': 'instructor',
'action': 'revoke',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'unique_student_identifier': self.instructor.username,
'rolename': 'instructor',
'action': 'revoke',
'removingSelfAsInstructor': True,
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_noparams(self):
""" Test missing all query parameters. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_bad_rolename(self):
""" Test with an invalid rolename parameter. """
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'robot-not-a-rolename',
})
self.assertEqual(response.status_code, 400)
def test_list_course_role_members_staff(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'staff',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'staff': [
{
'username': self.other_staff.username,
'email': self.other_staff.email,
'first_name': self.other_staff.first_name,
'last_name': self.other_staff.last_name,
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_list_course_role_members_beta(self):
url = reverse('list_course_role_members', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'rolename': 'beta',
})
self.assertEqual(response.status_code, 200)
# check response content
expected = {
'course_id': self.course.id.to_deprecated_string(),
'beta': []
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected)
def test_update_forum_role_membership(self):
"""
Test update forum role membership with user's email and username.
"""
# Seed forum roles for course.
seed_permissions_roles(self.course.id)
for user in [self.instructor, self.other_user]:
for identifier_attr in [user.email, user.username]:
for rolename in ["Administrator", "Moderator", "Community TA"]:
for action in ["allow", "revoke"]:
self.assert_update_forum_role_membership(user, identifier_attr, rolename, action)
def assert_update_forum_role_membership(self, current_user, identifier, rolename, action):
"""
Test update forum role membership.
Get unique_student_identifier, rolename and action and update forum role.
"""
url = reverse('update_forum_role_membership', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(
url,
{
'unique_student_identifier': identifier,
'rolename': rolename,
'action': action,
}
)
# Status code should be 200.
self.assertEqual(response.status_code, 200)
user_roles = current_user.roles.filter(course_id=self.course.id).values_list("name", flat=True)
if action == 'allow':
self.assertIn(rolename, user_roles)
elif action == 'revoke':
self.assertNotIn(rolename, user_roles)
@attr(shard=1)
@ddt.ddt
@patch.dict('django.conf.settings.FEATURES', {'ENABLE_PAID_COURSE_REGISTRATION': True})
class TestInstructorAPILevelsDataDump(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints that show data without side effects.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPILevelsDataDump, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorAPILevelsDataDump, self).setUp()
self.course_mode = CourseMode(course_id=self.course.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=40)
self.course_mode.save()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.cart = Order.get_cart_for_user(self.instructor)
self.coupon_code = 'abcde'
self.coupon = Coupon(code=self.coupon_code, description='testing code', course_id=self.course.id,
percentage_discount=10, created_by=self.instructor, is_active=True)
self.coupon.save()
# Create testing invoice 1
self.sale_invoice_1 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw', recipient_email='test1@test.com', customer_reference_number='2Fwe23S',
internal_reference="A", course_id=self.course.id, is_valid=True
)
self.invoice_item = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=self.sale_invoice_1,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
self.students = [UserFactory() for _ in xrange(6)]
for student in self.students:
CourseEnrollment.enroll(student, self.course.id)
self.students_who_may_enroll = self.students + [UserFactory() for _ in range(5)]
for student in self.students_who_may_enroll:
CourseEnrollmentAllowed.objects.create(
email=student.email, course_id=self.course.id
)
def register_with_redemption_code(self, user, code):
"""
enroll user using a registration code
"""
redeem_url = reverse('shoppingcart.views.register_code_redemption', args=[code], is_dashboard_endpoint=False)
self.client.login(username=user.username, password='test')
response = self.client.get(redeem_url)
self.assertEquals(response.status_code, 200)
# check button text
self.assertIn('Activate Course Enrollment', response.content)
response = self.client.post(redeem_url)
self.assertEquals(response.status_code, 200)
def test_invalidate_sale_record(self):
"""
Testing the sale invalidating scenario.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
data = {'invoice_number': self.sale_invoice_1.id, 'event_type': "invalidate"}
url = reverse('sale_validation', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.assert_request_status_code(200, url, method="POST", data=data)
#Now try to fetch data against not existing invoice number
test_data_1 = {'invoice_number': 100, 'event_type': "invalidate"}
self.assert_request_status_code(404, url, method="POST", data=test_data_1)
# Now invalidate the same invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("The sale associated with this invoice has already been invalidated.", response.content)
# now re_validate the invoice number
data['event_type'] = "re_validate"
self.assert_request_status_code(200, url, method="POST", data=data)
# Now re_validate the same active invoice number and expect an Bad request
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("This invoice is already active.", response.content)
test_data_2 = {'invoice_number': self.sale_invoice_1.id}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_2)
self.assertIn("Missing required event_type parameter", response.content)
test_data_3 = {'event_type': "re_validate"}
response = self.assert_request_status_code(400, url, method="POST", data=test_data_3)
self.assertIn("Missing required invoice_number parameter", response.content)
# submitting invalid invoice number
data['invoice_number'] = 'testing'
response = self.assert_request_status_code(400, url, method="POST", data=data)
self.assertIn("invoice_number must be an integer, {value} provided".format(value=data['invoice_number']), response.content)
def test_get_sale_order_records_features_csv(self):
"""
Test that the response from get_sale_order_records is in csv format.
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
self.cart.order_type = 'business'
self.cart.save()
self.cart.add_billing_details(company_name='Test Company', company_contact_name='Test',
company_contact_email='test@123', recipient_name='R1',
recipient_email='', customer_reference_number='PO#23')
paid_course_reg_item = PaidCourseRegistration.add_to_order(
self.cart,
self.course.id,
mode_slug=CourseMode.HONOR
)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
# get the updated item
item = self.cart.orderitem_set.all().select_subclasses()[0]
# get the redeemed coupon information
coupon_redemption = CouponRedemption.objects.select_related('coupon').filter(order=self.cart)
sale_order_url = reverse('get_sale_order_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(sale_order_url)
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertIn('36', response.content.split('\r\n')[1])
self.assertIn(str(item.unit_cost), response.content.split('\r\n')[1],)
self.assertIn(str(item.list_price), response.content.split('\r\n')[1],)
self.assertIn(item.status, response.content.split('\r\n')[1],)
self.assertIn(coupon_redemption[0].coupon.code, response.content.split('\r\n')[1],)
def test_coupon_redeem_count_in_ecommerce_section(self):
"""
Test that checks the redeem count in the instructor_dashboard coupon section
"""
# add the coupon code for the course
coupon = Coupon(
code='test_code', description='test_description', course_id=self.course.id,
percentage_discount='10', created_by=self.instructor, is_active=True
)
coupon.save()
# Coupon Redeem Count only visible for Financial Admins.
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': coupon.code}
)
self.assertEqual(resp.status_code, 200)
# URL for instructor dashboard
instructor_dashboard = reverse(
'instructor_dashboard',
kwargs={'course_id': self.course.id.to_deprecated_string()},
is_dashboard_endpoint=False
)
# visit the instructor dashboard page and
# check that the coupon redeem count should be 0
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>0</td>', resp.content)
# now make the payment of your cart items
self.cart.purchase()
# visit the instructor dashboard page and
# check that the coupon redeem count should be 1
resp = self.client.get(instructor_dashboard)
self.assertEqual(resp.status_code, 200)
self.assertIn('Number Redeemed', resp.content)
self.assertIn('<td>1</td>', resp.content)
def test_get_sale_records_features_csv(self):
"""
Test that the response from get_sale_records is in csv format.
"""
for i in range(2):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse(
'get_sale_records',
kwargs={'course_id': self.course.id.to_deprecated_string()}
)
response = self.client.post(url + '/csv', {})
self.assertEqual(response['Content-Type'], 'text/csv')
def test_get_sale_records_features_json(self):
"""
Test that the response from get_sale_records is in json format.
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='sale_invoice{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
for res in res_json['sale']:
self.validate_sale_records_response(
res,
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
def test_get_sale_records_features_with_multiple_invoices(self):
"""
Test that the response from get_sale_records is in json format for multiple invoices
"""
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='qwerty{}'.format(i),
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
course_registration_code.save()
# Create test invoice 2
sale_invoice_2 = Invoice.objects.create(
total_amount=1234.32, company_name='Test1', company_contact_name='TestName', company_contact_email='Test@company.com',
recipient_name='Testw_2', recipient_email='test2@test.com', customer_reference_number='2Fwe23S',
internal_reference="B", course_id=self.course.id
)
invoice_item_2 = CourseRegistrationCodeInvoiceItem.objects.create(
invoice=sale_invoice_2,
qty=1,
unit_price=1234.32,
course_id=self.course.id
)
for i in range(5):
course_registration_code = CourseRegistrationCode(
code='xyzmn{}'.format(i), course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor, invoice=sale_invoice_2, invoice_item=invoice_item_2, mode_slug='honor'
)
course_registration_code.save()
url = reverse('get_sale_records', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('sale', res_json)
self.validate_sale_records_response(
res_json['sale'][0],
course_registration_code,
self.sale_invoice_1,
0,
invoice_item=self.invoice_item
)
self.validate_sale_records_response(
res_json['sale'][1],
course_registration_code,
sale_invoice_2,
0,
invoice_item=invoice_item_2
)
def validate_sale_records_response(self, res, course_registration_code, invoice, used_codes, invoice_item):
"""
validate sale records attribute values with the response object
"""
self.assertEqual(res['total_amount'], invoice.total_amount)
self.assertEqual(res['recipient_email'], invoice.recipient_email)
self.assertEqual(res['recipient_name'], invoice.recipient_name)
self.assertEqual(res['company_name'], invoice.company_name)
self.assertEqual(res['company_contact_name'], invoice.company_contact_name)
self.assertEqual(res['company_contact_email'], invoice.company_contact_email)
self.assertEqual(res['internal_reference'], invoice.internal_reference)
self.assertEqual(res['customer_reference_number'], invoice.customer_reference_number)
self.assertEqual(res['invoice_number'], invoice.id)
self.assertEqual(res['created_by'], course_registration_code.created_by.username)
self.assertEqual(res['course_id'], invoice_item.course_id.to_deprecated_string())
self.assertEqual(res['total_used_codes'], used_codes)
self.assertEqual(res['total_codes'], 5)
def test_get_problem_responses_invalid_location(self):
"""
Test whether get_problem_responses returns an appropriate status
message when users submit an invalid problem location.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertEqual(res_json, 'Could not find problem with this location.')
def valid_problem_location(test): # pylint: disable=no-self-argument
"""
Decorator for tests that target get_problem_responses endpoint and
need to pretend user submitted a valid problem location.
"""
@functools.wraps(test)
def wrapper(self, *args, **kwargs):
"""
Run `test` method, ensuring that UsageKey.from_string returns a
problem key that the get_problem_responses endpoint can
work with.
"""
mock_problem_key = Mock(return_value=u'')
mock_problem_key.course_key = self.course.id
with patch.object(UsageKey, 'from_string') as patched_method:
patched_method.return_value = mock_problem_key
test(self, *args, **kwargs)
return wrapper
@valid_problem_location
def test_get_problem_responses_successful(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation was started successfully.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
problem_location = ''
response = self.client.post(url, {'problem_location': problem_location})
res_json = json.loads(response.content)
self.assertIn('status', res_json)
status = res_json['status']
self.assertIn('is being created', status)
self.assertNotIn('already in progress', status)
@valid_problem_location
def test_get_problem_responses_already_running(self):
"""
Test whether get_problem_responses returns an appropriate status
message if CSV generation is already in progress.
"""
url = reverse(
'get_problem_responses',
kwargs={'course_id': unicode(self.course.id)}
)
task_type = 'problem_responses_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_problem_responses_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_students_features(self):
"""
Test that some minimum of information is formatted
correctly in the response to get_students_features.
"""
for student in self.students:
student.profile.city = "Mos Eisley {}".format(student.id)
student.profile.save()
url = reverse('get_students_features', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertIn('students', res_json)
for student in self.students:
student_json = [
x for x in res_json['students']
if x['username'] == student.username
][0]
self.assertEqual(student_json['username'], student.username)
self.assertEqual(student_json['email'], student.email)
self.assertEqual(student_json['city'], student.profile.city)
self.assertEqual(student_json['country'], "")
@ddt.data(True, False)
def test_get_students_features_cohorted(self, is_cohorted):
"""
Test that get_students_features includes cohort info when the course is
cohorted, and does not when the course is not cohorted.
"""
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
set_course_cohorted(self.course.id, is_cohorted)
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('cohort' in res_json['feature_names'], is_cohorted)
@ddt.data(True, False)
def test_get_students_features_teams(self, has_teams):
"""
Test that get_students_features includes team info when the course is
has teams enabled, and does not when the course does not have teams enabled
"""
if has_teams:
self.course = CourseFactory.create(teams_configuration={
'max_size': 2, 'topics': [{'topic-id': 'topic', 'name': 'Topic', 'description': 'A Topic'}]
})
course_instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=course_instructor.username, password='test')
url = reverse('get_students_features', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {})
res_json = json.loads(response.content)
self.assertEqual('team' in res_json['feature_names'], has_teams)
def test_get_students_who_may_enroll(self):
"""
Test whether get_students_who_may_enroll returns an appropriate
status message when users request a CSV file of students who
may enroll in a course.
"""
url = reverse(
'get_students_who_may_enroll',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# CSV generation already in progress:
task_type = 'may_enroll_info_csv'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_calculate_may_enroll_csv') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_student_exam_results(self):
"""
Test whether get_proctored_exam_results returns an appropriate
status message when users request a CSV file.
"""
url = reverse(
'get_proctored_exam_results',
kwargs={'course_id': unicode(self.course.id)}
)
# Successful case:
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# CSV generation already in progress:
task_type = 'proctored_exam_results_report'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_proctored_exam_results_report') as submit_task_function:
error = AlreadyRunningError(already_running_status)
submit_task_function.side_effect = error
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_access_course_finance_admin_with_invalid_course_key(self):
"""
Test assert require_course fiance_admin before generating
a detailed enrollment report
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'invalid_course_key')
self.assertEqual(response.status_code, 404)
self.assertFalse(func.called)
def mock_request(self):
"""
mock request
"""
request = Mock()
request.user = self.instructor
return request
def test_access_course_finance_admin_with_valid_course_key(self):
"""
Test to check the course_finance_admin role with valid key
but doesn't have access to the function
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
response = decorated_func(request, 'valid/course/key')
self.assertEqual(response.status_code, 403)
self.assertFalse(func.called)
def test_add_user_to_fiance_admin_role_with_valid_course(self):
"""
test to check that a function is called using a fiance_admin
rights.
"""
func = Mock()
decorated_func = require_finance_admin(func)
request = self.mock_request()
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
decorated_func(request, self.course.id.to_deprecated_string())
self.assertTrue(func.called)
def test_enrollment_report_features_csv(self):
"""
test to generate enrollment report.
enroll users, admin staff using registration codes.
"""
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=self.sale_invoice_1.total_amount,
status='completed',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
admin_user = AdminFactory()
admin_cart = Order.get_cart_for_user(admin_user)
PaidCourseRegistration.add_to_order(admin_cart, self.course.id)
admin_cart.purchase()
# create a new user/student and enroll
# in the course using a registration code
# and then validates the generated detailed enrollment report
test_user = UserFactory()
self.register_with_redemption_code(test_user, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
UserProfileFactory.create(user=self.students[0], meta='{"company": "asdasda"}')
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_bulk_purchase_detailed_report(self):
"""
test to generate detailed enrollment report.
1 Purchase registration codes.
2 Enroll users via registration code.
3 Validate generated enrollment report.
"""
paid_course_reg_item = PaidCourseRegistration.add_to_order(self.cart, self.course.id)
# update the quantity of the cart item paid_course_reg_item
resp = self.client.post(
reverse('shoppingcart.views.update_user_cart', is_dashboard_endpoint=False),
{'ItemId': paid_course_reg_item.id, 'qty': '4'}
)
self.assertEqual(resp.status_code, 200)
# apply the coupon code to the item in the cart
resp = self.client.post(
reverse('shoppingcart.views.use_code', is_dashboard_endpoint=False),
{'code': self.coupon_code}
)
self.assertEqual(resp.status_code, 200)
self.cart.purchase()
course_reg_codes = CourseRegistrationCode.objects.filter(order=self.cart)
self.register_with_redemption_code(self.instructor, course_reg_codes[0].code)
test_user = UserFactory()
test_user_cart = Order.get_cart_for_user(test_user)
PaidCourseRegistration.add_to_order(test_user_cart, self.course.id)
test_user_cart.purchase()
InvoiceTransaction.objects.create(
invoice=self.sale_invoice_1,
amount=-self.sale_invoice_1.total_amount,
status='refunded',
created_by=self.instructor,
last_modified_by=self.instructor
)
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_create_registration_code_without_invoice_and_order(self):
"""
test generate detailed enrollment report,
used a registration codes which has been created via invoice or bulk
purchase scenario.
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
def test_invoice_payment_is_still_pending_for_registration_codes(self):
"""
test generate enrollment report
enroll a user in a course using registration code
whose invoice has not been paid yet
"""
course_registration_code = CourseRegistrationCode.objects.create(
code='abcde',
course_id=self.course.id.to_deprecated_string(),
created_by=self.instructor,
invoice=self.sale_invoice_1,
invoice_item=self.invoice_item,
mode_slug='honor'
)
test_user1 = UserFactory()
self.register_with_redemption_code(test_user1, course_registration_code.code)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
url = reverse('get_enrollment_report', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertIn('The detailed enrollment report is being created.', response.content)
@patch.object(lms.djangoapps.instructor.views.api, 'anonymous_id_for_user', Mock(return_value='42'))
@patch.object(lms.djangoapps.instructor.views.api, 'unique_id_for_user', Mock(return_value='41'))
def test_get_anon_ids(self):
"""
Test the CSV output for the anonymized user ids.
"""
url = reverse('get_anon_ids', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(
'"User ID","Anonymized User ID","Course Specific Anonymized User ID"'
'\n"{user_id}","41","42"\n'.format(user_id=self.students[0].id)
))
self.assertTrue(
body.endswith('"{user_id}","41","42"\n'.format(user_id=self.students[-1].id))
)
def test_list_report_downloads(self):
url = reverse('list_report_downloads', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor_task.models.DjangoStorageReportStore.links_for') as mock_links_for:
mock_links_for.return_value = [
('mock_file_name_1', 'https://1.mock.url'),
('mock_file_name_2', 'https://2.mock.url'),
]
response = self.client.post(url, {})
expected_response = {
"downloads": [
{
"url": "https://1.mock.url",
"link": "<a href=\"https://1.mock.url\">mock_file_name_1</a>",
"name": "mock_file_name_1"
},
{
"url": "https://2.mock.url",
"link": "<a href=\"https://2.mock.url\">mock_file_name_2</a>",
"name": "mock_file_name_2"
}
]
}
res_json = json.loads(response.content)
self.assertEqual(res_json, expected_response)
@ddt.data(*REPORTS_DATA)
@ddt.unpack
@valid_problem_location
def test_calculate_report_csv_success(self, report_type, instructor_api_endpoint, task_api_endpoint, extra_instructor_api_kwargs):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
success_status = "The {report_type} report is being created.".format(report_type=report_type)
if report_type == 'problem responses':
with patch(task_api_endpoint):
response = self.client.post(url, {'problem_location': ''})
self.assertIn(success_status, response.content)
else:
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.post(url, {})
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_success(
self,
report_type,
task_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
): # pylint: disable=unused-argument
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
with patch(task_api_endpoint):
response = self.client.post(url, {})
success_status = "The {report_type} report is being created." \
" To view the status of the report, see Pending" \
" Tasks below".format(report_type=report_type)
self.assertIn(success_status, response.content)
@ddt.data(*EXECUTIVE_SUMMARY_DATA)
@ddt.unpack
def test_executive_summary_report_already_running(
self,
report_type,
task_type,
instructor_api_endpoint,
task_api_endpoint,
extra_instructor_api_kwargs
):
kwargs = {'course_id': unicode(self.course.id)}
kwargs.update(extra_instructor_api_kwargs)
url = reverse(instructor_api_endpoint, kwargs=kwargs)
CourseFinanceAdminRole(self.course.id).add_users(self.instructor)
already_running_status = generate_already_running_error_message(task_type)
with patch(task_api_endpoint) as mock:
mock.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_ora2_responses_success(self):
url = reverse('export_ora2_data', kwargs={'course_id': unicode(self.course.id)})
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.return_value = True
response = self.client.post(url, {})
success_status = "The ORA data report is being created."
self.assertIn(success_status, response.content)
def test_get_ora2_responses_already_running(self):
url = reverse('export_ora2_data', kwargs={'course_id': unicode(self.course.id)})
task_type = 'export_ora2_data'
already_running_status = generate_already_running_error_message(task_type)
with patch('lms.djangoapps.instructor_task.api.submit_export_ora2_data') as mock_submit_ora2_task:
mock_submit_ora2_task.side_effect = AlreadyRunningError(already_running_status)
response = self.client.post(url, {})
self.assertEqual(response.status_code, 400)
self.assertIn(already_running_status, response.content)
def test_get_student_progress_url(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'unique_student_identifier': self.students[0].email.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_from_uname(self):
""" Test that progress_url is in the successful response. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'unique_student_identifier': self.students[0].username.encode("utf-8")}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
res_json = json.loads(response.content)
self.assertIn('progress_url', res_json)
def test_get_student_progress_url_noparams(self):
""" Test that the endpoint 404's without the required query params. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
def test_get_student_progress_url_nostudent(self):
""" Test that the endpoint 400's when requesting an unknown email. """
url = reverse('get_student_progress_url', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url)
self.assertEqual(response.status_code, 400)
@attr(shard=1)
class TestInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can change student grades.
This includes resetting attempts and starting rescore tasks.
This test does NOT test whether the actions had an effect on the
database, that is the job of task tests and test_enrollment.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create()
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module_to_reset = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
def test_reset_student_attempts_deletall(self):
""" Make sure no one can delete all students state on a problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_student_attempts_single(self):
""" Test reset single student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_module = StudentModule.objects.get(pk=self.module_to_reset.pk)
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_for_all_students')
def test_reset_student_attempts_all(self, act):
""" Test reset all student attempts. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_missingmodule(self):
""" Test reset for non-existant problem. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': 'robot-not-a-real-module',
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@patch('lms.djangoapps.grades.signals.handlers.PROBLEM_WEIGHTED_SCORE_CHANGED.send')
def test_reset_student_attempts_delete(self, _mock_signal):
""" Test delete single student state. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
self.assertEqual(
StudentModule.objects.filter(
student=self.module_to_reset.student,
course_id=self.module_to_reset.course_id,
# module_id=self.module_to_reset.module_id,
).count(),
0
)
def test_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_student')
def test_rescore_problem_single_from_uname(self, act):
""" Test rescoring of a single student. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'unique_student_identifier': self.student.username,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_problem_for_all_students')
def test_rescore_problem_all(self, act):
""" Test rescoring for all students. """
url = reverse('rescore_problem', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'problem_to_reset': self.problem_urlname,
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_course_has_entrance_exam_in_student_attempts_reset(self):
""" Test course has entrance exam id set while resetting attempts"""
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': False,
})
self.assertEqual(response.status_code, 400)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test course has entrance exam id set while re-scoring. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
@attr(shard=1)
@patch.dict(settings.FEATURES, {'ENTRANCE_EXAMS': True})
@ddt.ddt
class TestEntranceExamInstructorAPIRegradeTask(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test endpoints whereby instructors can rescore student grades,
reset student attempts and delete state for entrance exam.
"""
@classmethod
def setUpClass(cls):
super(TestEntranceExamInstructorAPIRegradeTask, cls).setUpClass()
cls.course = CourseFactory.create(
org='test_org',
course='test_course',
run='test_run',
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.course_with_invalid_ee = CourseFactory.create(entrance_exam_id='invalid_exam')
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.entrance_exam = ItemFactory.create(
parent=cls.course,
category='chapter',
display_name='Entrance exam'
)
subsection = ItemFactory.create(
parent=cls.entrance_exam,
category='sequential',
display_name='Subsection 1'
)
vertical = ItemFactory.create(
parent=subsection,
category='vertical',
display_name='Vertical 1'
)
cls.ee_problem_1 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 1"
)
cls.ee_problem_2 = ItemFactory.create(
parent=vertical,
category="problem",
display_name="Exam Problem - Problem 2"
)
def setUp(self):
super(TestEntranceExamInstructorAPIRegradeTask, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
# Add instructor to invalid ee course
CourseInstructorRole(self.course_with_invalid_ee.id).add_users(self.instructor)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
ee_module_to_reset1 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_1.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
ee_module_to_reset2 = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.ee_problem_2.location,
state=json.dumps({'attempts': 10, 'done': True}),
)
self.ee_modules = [ee_module_to_reset1.module_state_key, ee_module_to_reset2.module_state_key]
@ddt.data(ModuleStoreEnum.Type.split, ModuleStoreEnum.Type.mongo)
def test_grade_histogram(self, store):
"""
Verify that a histogram has been created.
"""
course = CourseFactory.create(default_store=store)
usage_key = course.id.make_usage_key('problem', 'first_problem')
StudentModule.objects.create(
student_id=1,
grade=100,
module_state_key=usage_key
)
StudentModule.objects.create(
student_id=2,
grade=50,
module_state_key=usage_key
)
grades = grade_histogram(usage_key)
self.assertEqual(grades[0], (50.0, 1))
self.assertEqual(grades[1], (100.0, 1))
def test_reset_entrance_exam_student_attempts_delete_all(self):
""" Make sure no one can delete all students state on entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'delete_module': True,
})
self.assertEqual(response.status_code, 400)
def test_reset_entrance_exam_student_attempts_single(self):
""" Test reset single student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# make sure problem attempts have been reset.
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
for changed_module in changed_modules:
self.assertEqual(
json.loads(changed_module.state)['attempts'],
0
)
# mock out the function which should be called to execute the action.
@patch.object(lms.djangoapps.instructor_task.api, 'submit_reset_problem_attempts_in_entrance_exam')
def test_reset_entrance_exam_all_student_attempts(self, act):
""" Test reset all student attempts for entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_reset_student_attempts_invalid_entrance_exam(self):
""" Test reset for invalid entrance exam. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_entrance_exam_student_delete_state(self):
""" Test delete single student entrance exam state. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 200)
# make sure the module has been deleted
changed_modules = StudentModule.objects.filter(module_state_key__in=self.ee_modules)
self.assertEqual(changed_modules.count(), 0)
def test_entrance_exam_delete_state_with_staff(self):
""" Test entrance exam delete state failure with staff access. """
self.client.logout()
staff_user = StaffFactory(course_key=self.course.id)
self.client.login(username=staff_user.username, password='test')
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'delete_module': True,
})
self.assertEqual(response.status_code, 403)
def test_entrance_exam_reset_student_attempts_nonsense(self):
""" Test failure with both unique_student_identifier and all_students. """
url = reverse('reset_student_attempts_for_entrance_exam',
kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
@patch.object(lms.djangoapps.instructor_task.api, 'submit_rescore_entrance_exam_for_student')
def test_rescore_entrance_exam_single_student(self, act):
""" Test re-scoring of entrance exam for single student. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
self.assertTrue(act.called)
def test_rescore_entrance_exam_all_student(self):
""" Test rescoring for all students. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_if_higher_all_student(self):
""" Test rescoring for all students only if higher. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'all_students': True,
'only_if_higher': True,
})
self.assertEqual(response.status_code, 200)
def test_rescore_entrance_exam_all_student_and_single(self):
""" Test re-scoring with both all students and single student parameters. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
'all_students': True,
})
self.assertEqual(response.status_code, 400)
def test_rescore_entrance_exam_with_invalid_exam(self):
""" Test re-scoring of entrance exam with invalid exam. """
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_list_entrance_exam_instructor_tasks_student(self):
""" Test list task history for entrance exam AND student. """
# create a re-score entrance exam task
url = reverse('rescore_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 1)
self.assertEqual(tasks[0]['status'], _('Complete'))
def test_list_entrance_exam_instructor_tasks_all_student(self):
""" Test list task history for entrance exam AND all student. """
url = reverse('list_entrance_exam_instructor_tasks', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
tasks = json.loads(response.content)['tasks']
self.assertEqual(len(tasks), 0)
def test_list_entrance_exam_instructor_with_invalid_exam_key(self):
""" Test list task history for entrance exam failure if course has invalid exam. """
url = reverse('list_entrance_exam_instructor_tasks',
kwargs={'course_id': unicode(self.course_with_invalid_ee.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 400)
def test_skip_entrance_exam_student(self):
""" Test skip entrance exam api for student. """
# create a re-score entrance exam task
url = reverse('mark_student_can_skip_entrance_exam', kwargs={'course_id': unicode(self.course.id)})
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
message = _('This student (%s) will skip the entrance exam.') % self.student.email
self.assertContains(response, message)
# post again with same student
response = self.client.post(url, {
'unique_student_identifier': self.student.email,
})
# This time response message should be different
message = _('This student (%s) is already allowed to skip the entrance exam.') % self.student.email
self.assertContains(response, message)
@attr(shard=1)
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message', autospec=True))
class TestInstructorSendEmail(SiteMixin, SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Checks that only instructors have access to email endpoints, and that
these endpoints are only accessible with courses that actually exist,
only with valid email messages.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorSendEmail, cls).setUpClass()
cls.course = CourseFactory.create()
test_subject = u'\u1234 test subject'
test_message = u'\u6824 test message'
cls.full_test_message = {
'send_to': '["myself", "staff"]',
'subject': test_subject,
'message': test_message,
}
BulkEmailFlag.objects.create(enabled=True, require_course_email_auth=False)
@classmethod
def tearDownClass(cls):
super(TestInstructorSendEmail, cls).tearDownClass()
BulkEmailFlag.objects.all().delete()
def setUp(self):
super(TestInstructorSendEmail, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_send_email_as_logged_in_instructor(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
def test_send_email_but_not_logged_in(self):
self.client.logout()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_not_staff(self):
self.client.logout()
student = UserFactory()
self.client.login(username=student.username, password='test')
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 403)
def test_send_email_but_course_not_exist(self):
url = reverse('send_email', kwargs={'course_id': 'GarbageCourse/DNE/NoTerm'})
response = self.client.post(url, self.full_test_message)
self.assertNotEqual(response.status_code, 200)
def test_send_email_no_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_invalid_sendto(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["invalid_target", "staff"]',
'subject': 'test subject',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_subject(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["staff"]',
'message': 'test message',
})
self.assertEqual(response.status_code, 400)
def test_send_email_no_message(self):
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'send_to': '["staff"]',
'subject': 'test subject',
})
self.assertEqual(response.status_code, 400)
def test_send_email_with_site_template_and_from_addr(self):
site_email = self.site_configuration.values.get('course_email_from_addr')
site_template = self.site_configuration.values.get('course_email_template_name')
CourseEmailTemplate.objects.create(name=site_template)
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, CourseEmail.objects.filter(
course_id=self.course.id,
sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=site_template,
from_addr=site_email
).count())
def test_send_email_with_org_template_and_from_addr(self):
org_email = 'fake_org@example.com'
org_template = 'fake_org_email_template'
CourseEmailTemplate.objects.create(name=org_template)
self.site_configuration.values.update({
'course_email_from_addr': {self.course.id.org: org_email},
'course_email_template_name': {self.course.id.org: org_template}
})
self.site_configuration.save()
url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, self.full_test_message)
self.assertEqual(response.status_code, 200)
self.assertEqual(1, CourseEmail.objects.filter(
course_id=self.course.id,
sender=self.instructor,
subject=self.full_test_message['subject'],
html_message=self.full_test_message['message'],
template_name=org_template,
from_addr=org_email
).count())
class MockCompletionInfo(object):
"""Mock for get_task_completion_info"""
times_called = 0
def mock_get_task_completion_info(self, *args): # pylint: disable=unused-argument
"""Mock for get_task_completion_info"""
self.times_called += 1
if self.times_called % 2 == 0:
return True, 'Task Completed'
return False, 'Task Errored In Some Way'
@attr(shard=1)
class TestInstructorAPITaskLists(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test instructor task list endpoint.
"""
class FakeTask(object):
""" Fake task object """
FEATURES = [
'task_type',
'task_input',
'task_id',
'requester',
'task_state',
'created',
'status',
'task_message',
'duration_sec'
]
def __init__(self, completion):
for feature in self.FEATURES:
setattr(self, feature, 'expected')
# created needs to be a datetime
self.created = datetime.datetime(2013, 10, 25, 11, 42, 35)
# set 'status' and 'task_message' attrs
success, task_message = completion()
if success:
self.status = "Complete"
else:
self.status = "Incomplete"
self.task_message = task_message
# Set 'task_output' attr, which will be parsed to the 'duration_sec' attr.
self.task_output = '{"duration_ms": 1035000}'
self.duration_sec = 1035000 / 1000.0
def make_invalid_output(self):
"""Munge task_output to be invalid json"""
self.task_output = 'HI MY NAME IS INVALID JSON'
# This should be given the value of 'unknown' if the task output
# can't be properly parsed
self.duration_sec = 'unknown'
def to_dict(self):
""" Convert fake task to dictionary representation. """
attr_dict = {key: getattr(self, key) for key in self.FEATURES}
attr_dict['created'] = attr_dict['created'].isoformat()
return attr_dict
@classmethod
def setUpClass(cls):
super(TestInstructorAPITaskLists, cls).setUpClass()
cls.course = CourseFactory.create(
entrance_exam_id='i4x://{}/{}/chapter/Entrance_exam'.format('test_org', 'test_course')
)
cls.problem_location = msk_from_problem_urlname(
cls.course.id,
'robot-some-problem-urlname'
)
cls.problem_urlname = cls.problem_location.to_deprecated_string()
def setUp(self):
super(TestInstructorAPITaskLists, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.student = UserFactory()
CourseEnrollment.enroll(self.student, self.course.id)
self.module = StudentModule.objects.create(
student=self.student,
course_id=self.course.id,
module_state_key=self.problem_location,
state=json.dumps({'attempts': 10}),
)
mock_factory = MockCompletionInfo()
self.tasks = [self.FakeTask(mock_factory.mock_get_task_completion_info) for _ in xrange(7)]
self.tasks[-1].make_invalid_output()
@patch.object(lms.djangoapps.instructor_task.api, 'get_running_instructor_tasks')
def test_list_instructor_tasks_running(self, act):
""" Test list of all running tasks. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_background_email_tasks(self, act):
"""Test list of background email tasks."""
act.return_value = self.tasks
url = reverse('list_background_email_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem(self, act):
""" Test list task history for problem. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history')
def test_list_instructor_tasks_problem_student(self, act):
""" Test list task history for problem AND student. """
act.return_value = self.tasks
url = reverse('list_instructor_tasks', kwargs={'course_id': self.course.id.to_deprecated_string()})
mock_factory = MockCompletionInfo()
with patch(
'lms.djangoapps.instructor.views.instructor_task_helpers.get_task_completion_info'
) as mock_completion_info:
mock_completion_info.side_effect = mock_factory.mock_get_task_completion_info
response = self.client.post(url, {
'problem_location_str': self.problem_urlname,
'unique_student_identifier': self.student.email,
})
self.assertEqual(response.status_code, 200)
# check response
self.assertTrue(act.called)
expected_tasks = [ftask.to_dict() for ftask in self.tasks]
actual_tasks = json.loads(response.content)['tasks']
for exp_task, act_task in zip(expected_tasks, actual_tasks):
self.assertDictEqual(exp_task, act_task)
self.assertEqual(actual_tasks, expected_tasks)
@attr(shard=1)
@patch.object(lms.djangoapps.instructor_task.api, 'get_instructor_task_history', autospec=True)
class TestInstructorEmailContentList(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test the instructor email content history endpoint.
"""
@classmethod
def setUpClass(cls):
super(TestInstructorEmailContentList, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestInstructorEmailContentList, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
self.tasks = {}
self.emails = {}
self.emails_info = {}
def setup_fake_email_info(self, num_emails, with_failures=False):
""" Initialize the specified number of fake emails """
for email_id in range(num_emails):
num_sent = random.randint(1, 15401)
if with_failures:
failed = random.randint(1, 15401)
else:
failed = 0
self.tasks[email_id] = FakeContentTask(email_id, num_sent, failed, 'expected')
self.emails[email_id] = FakeEmail(email_id)
self.emails_info[email_id] = FakeEmailInfo(self.emails[email_id], num_sent, failed)
def get_matching_mock_email(self, **kwargs):
""" Returns the matching mock emails for the given id """
email_id = kwargs.get('id', 0)
return self.emails[email_id]
def get_email_content_response(self, num_emails, task_history_request, with_failures=False):
""" Calls the list_email_content endpoint and returns the repsonse """
self.setup_fake_email_info(num_emails, with_failures)
task_history_request.return_value = self.tasks.values()
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.side_effect = self.get_matching_mock_email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
return response
def check_emails_sent(self, num_emails, task_history_request, with_failures=False):
""" Tests sending emails with or without failures """
response = self.get_email_content_response(num_emails, task_history_request, with_failures)
self.assertTrue(task_history_request.called)
expected_email_info = [email_info.to_dict() for email_info in self.emails_info.values()]
actual_email_info = json.loads(response.content)['emails']
self.assertEqual(len(actual_email_info), num_emails)
for exp_email, act_email in zip(expected_email_info, actual_email_info):
self.assertDictEqual(exp_email, act_email)
self.assertEqual(expected_email_info, actual_email_info)
def test_content_list_one_email(self, task_history_request):
""" Test listing of bulk emails when email list has one email """
response = self.get_email_content_response(1, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should have one email
self.assertEqual(len(email_info), 1)
# Email content should be what's expected
expected_message = self.emails[0].html_message
returned_email_info = email_info[0]
received_message = returned_email_info[u'email'][u'html_message']
self.assertEqual(expected_message, received_message)
def test_content_list_no_emails(self, task_history_request):
""" Test listing of bulk emails when email list empty """
response = self.get_email_content_response(0, task_history_request)
self.assertTrue(task_history_request.called)
email_info = json.loads(response.content)['emails']
# Emails list should be empty
self.assertEqual(len(email_info), 0)
def test_content_list_email_content_many(self, task_history_request):
""" Test listing of bulk emails sent large amount of emails """
self.check_emails_sent(50, task_history_request)
def test_list_email_content_error(self, task_history_request):
""" Test handling of error retrieving email """
invalid_task = FakeContentTask(0, 0, 0, 'test')
invalid_task.make_invalid_input()
task_history_request.return_value = [invalid_task]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_email_info = json.loads(response.content)['emails']
self.assertEqual(len(returned_email_info), 1)
returned_info = returned_email_info[0]
for info in ['created', 'sent_to', 'email', 'number_sent', 'requester']:
self.assertEqual(returned_info[info], None)
def test_list_email_with_failure(self, task_history_request):
""" Test the handling of email task that had failures """
self.check_emails_sent(1, task_history_request, True)
def test_list_many_emails_with_failures(self, task_history_request):
""" Test the handling of many emails with failures """
self.check_emails_sent(50, task_history_request, True)
def test_list_email_with_no_successes(self, task_history_request):
task_info = FakeContentTask(0, 0, 10, 'expected')
email = FakeEmail(0)
email_info = FakeEmailInfo(email, 0, 10)
task_history_request.return_value = [task_info]
url = reverse('list_email_content', kwargs={'course_id': self.course.id.to_deprecated_string()})
with patch('lms.djangoapps.instructor.views.api.CourseEmail.objects.get') as mock_email_info:
mock_email_info.return_value = email
response = self.client.post(url, {})
self.assertEqual(response.status_code, 200)
self.assertTrue(task_history_request.called)
returned_info_list = json.loads(response.content)['emails']
self.assertEqual(len(returned_info_list), 1)
returned_info = returned_info_list[0]
expected_info = email_info.to_dict()
self.assertDictEqual(expected_info, returned_info)
@attr(shard=1)
class TestInstructorAPIHelpers(TestCase):
""" Test helpers for instructor.api """
def test_split_input_list(self):
strings = []
lists = []
strings.append(
"Lorem@ipsum.dolor, sit@amet.consectetur\nadipiscing@elit.Aenean\r convallis@at.lacus\r, ut@lacinia.Sed")
lists.append(['Lorem@ipsum.dolor', 'sit@amet.consectetur', 'adipiscing@elit.Aenean', 'convallis@at.lacus',
'ut@lacinia.Sed'])
for (stng, lst) in zip(strings, lists):
self.assertEqual(_split_input_list(stng), lst)
def test_split_input_list_unicode(self):
self.assertEqual(_split_input_list('robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
['robot@robot.edu', 'robot2@robot.edu'])
self.assertEqual(_split_input_list(u'robot@robot.edu, robot2@robot.edu'),
[u'robot@robot.edu', 'robot2@robot.edu'])
scary_unistuff = unichr(40960) + u'abcd' + unichr(1972)
self.assertEqual(_split_input_list(scary_unistuff), [scary_unistuff])
def test_msk_from_problem_urlname(self):
course_id = SlashSeparatedCourseKey('MITx', '6.002x', '2013_Spring')
name = 'L2Node1'
output = 'i4x://MITx/6.002x/problem/L2Node1'
self.assertEqual(msk_from_problem_urlname(course_id, name).to_deprecated_string(), output)
@raises(ValueError)
def test_msk_from_problem_urlname_error(self):
args = ('notagoodcourse', 'L2Node1')
msk_from_problem_urlname(*args)
def get_extended_due(course, unit, user):
"""
Gets the overridden due date for the given user on the given unit. Returns
`None` if there is no override set.
"""
try:
override = StudentFieldOverride.objects.get(
course_id=course.id,
student=user,
location=unit.location,
field='due'
)
return DATE_FIELD.from_json(json.loads(override.value))
except StudentFieldOverride.DoesNotExist:
return None
@attr(shard=1)
class TestDueDateExtensions(SharedModuleStoreTestCase, LoginEnrollmentTestCase):
"""
Test data dumps for reporting.
"""
@classmethod
def setUpClass(cls):
super(TestDueDateExtensions, cls).setUpClass()
cls.course = CourseFactory.create()
cls.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with cls.store.bulk_operations(cls.course.id, emit_signals=False):
cls.week1 = ItemFactory.create(due=cls.due)
cls.week2 = ItemFactory.create(due=cls.due)
cls.week3 = ItemFactory.create() # No due date
cls.course.children = [
cls.week1.location.to_deprecated_string(),
cls.week2.location.to_deprecated_string(),
cls.week3.location.to_deprecated_string()
]
cls.homework = ItemFactory.create(
parent_location=cls.week1.location,
due=cls.due
)
cls.week1.children = [cls.homework.location.to_deprecated_string()]
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensions, self).setUp()
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_change_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
def test_change_to_invalid_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '01/01/2009 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_change_nonexistent_due_date(self):
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week3.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 400, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week3, self.user1)
)
def test_reset_date(self):
self.test_change_due_date()
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
def test_reset_nonexistent_extension(self):
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 400, response.content)
def test_show_unit_extensions(self):
self.test_change_due_date()
url = reverse('show_unit_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'url': self.week1.location.to_deprecated_string()})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Full Name': self.user1.profile.name,
u'Username': self.user1.username}],
u'header': [u'Username', u'Full Name', u'Extended Due Date'],
u'title': u'Users with due date extensions for %s' %
self.week1.display_name})
def test_show_student_extensions(self):
self.test_change_due_date()
url = reverse('show_student_extensions',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {'student': self.user1.username})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(json.loads(response.content), {
u'data': [{u'Extended Due Date': u'2013-12-30 00:00',
u'Unit': self.week1.display_name}],
u'header': [u'Unit', u'Extended Due Date'],
u'title': u'Due date extensions for %s (%s)' % (
self.user1.profile.name, self.user1.username)})
@attr(shard=1)
class TestDueDateExtensionsDeletedDate(ModuleStoreTestCase, LoginEnrollmentTestCase):
def setUp(self):
"""
Fixtures.
"""
super(TestDueDateExtensionsDeletedDate, self).setUp()
self.course = CourseFactory.create()
self.due = datetime.datetime(2010, 5, 12, 2, 42, tzinfo=utc)
with self.store.bulk_operations(self.course.id, emit_signals=False):
self.week1 = ItemFactory.create(due=self.due)
self.week2 = ItemFactory.create(due=self.due)
self.week3 = ItemFactory.create() # No due date
self.course.children = [
self.week1.location.to_deprecated_string(),
self.week2.location.to_deprecated_string(),
self.week3.location.to_deprecated_string()
]
self.homework = ItemFactory.create(
parent_location=self.week1.location,
due=self.due
)
self.week1.children = [self.homework.location.to_deprecated_string()]
user1 = UserFactory.create()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week2.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.week3.location).save()
StudentModule(
state='{}',
student_id=user1.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user2 = UserFactory.create()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user2.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
user3 = UserFactory.create()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.week1.location).save()
StudentModule(
state='{}',
student_id=user3.id,
course_id=self.course.id,
module_state_key=self.homework.location).save()
self.user1 = user1
self.user2 = user2
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def test_reset_extension_to_deleted_date(self):
"""
Test that we can delete a due date extension after deleting the normal
due date, without causing an error.
"""
url = reverse('change_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
'due_datetime': '12/30/2013 00:00'
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(datetime.datetime(2013, 12, 30, 0, 0, tzinfo=utc),
get_extended_due(self.course, self.week1, self.user1))
self.week1.due = None
self.week1 = self.store.update_item(self.week1, self.user1.id)
# Now, week1's normal due date is deleted but the extension still exists.
url = reverse('reset_due_date', kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url, {
'student': self.user1.username,
'url': self.week1.location.to_deprecated_string(),
})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(
None,
get_extended_due(self.course, self.week1, self.user1)
)
@attr(shard=1)
class TestCourseIssuedCertificatesData(SharedModuleStoreTestCase):
"""
Test data dumps for issued certificates.
"""
@classmethod
def setUpClass(cls):
super(TestCourseIssuedCertificatesData, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestCourseIssuedCertificatesData, self).setUp()
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
def generate_certificate(self, course_id, mode, status):
"""
Generate test certificate
"""
test_user = UserFactory()
GeneratedCertificateFactory.create(
user=test_user,
course_id=course_id,
mode=mode,
status=status
)
def test_certificates_features_against_status(self):
"""
Test certificates with status 'downloadable' should be in the response.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.generating)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 0)
# Certificates with status 'downloadable' should be in response.
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
def test_certificates_features_group_by_mode(self):
"""
Test for certificate csv features against mode. Certificates should be group by 'mode' in reponse.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
self.assertEqual(len(res_json['certificates']), 1)
# retrieve the first certificate from the list, there should be 3 certificates for 'honor' mode.
certificate = res_json['certificates'][0]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'honor')
self.assertEqual(certificate.get('course_id'), str(self.course.id))
# Now generating downloadable certificates with 'verified' mode
for __ in xrange(certificate_count):
self.generate_certificate(
course_id=self.course.id,
mode='verified',
status=CertificateStatuses.downloadable
)
response = self.client.post(url)
res_json = json.loads(response.content)
self.assertIn('certificates', res_json)
# total certificate count should be 2 for 'verified' mode.
self.assertEqual(len(res_json['certificates']), 2)
# retrieve the second certificate from the list
certificate = res_json['certificates'][1]
self.assertEqual(certificate.get('total_issued_certificate'), 3)
self.assertEqual(certificate.get('mode'), 'verified')
def test_certificates_features_csv(self):
"""
Test for certificate csv features.
"""
url = reverse('get_issued_certificates', kwargs={'course_id': unicode(self.course.id)})
# firstly generating downloadable certificates with 'honor' mode
certificate_count = 3
for __ in xrange(certificate_count):
self.generate_certificate(course_id=self.course.id, mode='honor', status=CertificateStatuses.downloadable)
current_date = datetime.date.today().strftime("%B %d, %Y")
response = self.client.get(url, {'csv': 'true'})
self.assertEqual(response['Content-Type'], 'text/csv')
self.assertEqual(response['Content-Disposition'], 'attachment; filename={0}'.format('issued_certificates.csv'))
self.assertEqual(
response.content.strip(),
'"CourseID","Certificate Type","Total Certificates Issued","Date Report Run"\r\n"'
+ str(self.course.id) + '","honor","3","' + current_date + '"'
)
@attr(shard=1)
@override_settings(REGISTRATION_CODE_LENGTH=8)
class TestCourseRegistrationCodes(SharedModuleStoreTestCase):
"""
Test data dumps for E-commerce Course Registration Codes.
"""
@classmethod
def setUpClass(cls):
super(TestCourseRegistrationCodes, cls).setUpClass()
cls.course = CourseFactory.create()
cls.url = reverse(
'generate_registration_codes',
kwargs={'course_id': cls.course.id.to_deprecated_string()}
)
def setUp(self):
"""
Fixtures.
"""
super(TestCourseRegistrationCodes, self).setUp()
CourseModeFactory.create(course_id=self.course.id, min_price=50)
self.instructor = InstructorFactory(course_key=self.course.id)
self.client.login(username=self.instructor.username, password='test')
CourseSalesAdminRole(self.course.id).add_users(self.instructor)
data = {
'total_registration_codes': 12, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street',
'address_line_2': '', 'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(self.url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(5):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(5):
i += 1
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
@override_settings(FINANCE_EMAIL='finance@example.com')
def test_finance_email_in_recipient_list_when_generating_registration_codes(self):
"""
Test to verify that the invoice will also be sent to the FINANCE_EMAIL when
generating registration codes
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# check for the last mail.outbox, The FINANCE_EMAIL has been appended at the
# very end, when generating registration codes
self.assertEqual(mail.outbox[-1].to[0], 'finance@example.com')
def test_user_invoice_copy_preference(self):
"""
Test to remember user invoice copy preference
"""
url_reg_code = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 5, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 121.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': 'True'
}
# user invoice copy preference will be saved in api user preference; model
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], True)
# updating the user invoice copy preference during code generation flow
data['invoice'] = ''
response = self.client.post(url_reg_code, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
# get user invoice copy preference.
url_user_invoice_preference = reverse('get_user_invoice_preference',
kwargs={'course_id': self.course.id.to_deprecated_string()})
response = self.client.post(url_user_invoice_preference, data)
result = json.loads(response.content)
self.assertEqual(result['invoice_copy'], False)
def test_generate_course_registration_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
def test_generate_course_registration_with_redeem_url_codes_csv(self):
"""
Test to generate a response of all the generated course registration codes
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 15, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 17)
rows = body.split('\n')
index = 1
while index < len(rows):
if rows[index]:
row_data = rows[index].split(',')
code = row_data[0].replace('"', '')
self.assertTrue(row_data[1].startswith('"http')
and row_data[1].endswith('/shoppingcart/register/redeem/{0}/"'.format(code)))
index += 1
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'second', 'third', 'fourth']))
def test_generate_course_registration_codes_matching_existing_coupon_code(self):
"""
Test the generated course registration code is already in the Coupon Table
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
coupon = Coupon(code='first', course_id=self.course.id.to_deprecated_string(), created_by=self.instructor)
coupon.save()
data = {
'total_registration_codes': 3, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 5) # 1 for headers, 1 for new line at the end and 3 for the actual data
@patch.object(lms.djangoapps.instructor.views.api, 'random_code_generator',
Mock(side_effect=['first', 'first', 'second', 'third']))
def test_generate_course_registration_codes_integrity_error(self):
"""
Test for the Integrity error against the generated code
"""
url = reverse('generate_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {
'total_registration_codes': 2, 'company_name': 'Test Group', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 4)
def test_spent_course_registration_codes_csv(self):
"""
Test to generate a response of all the spent course registration codes
"""
url = reverse('spent_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'spent_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 7)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'unit_price': 122.45, 'company_contact_email': 'Test@company.com', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
for i in range(9):
order = Order(user=self.instructor, status='purchased')
order.save()
# Spent(used) Registration Codes
for i in range(9):
i += 13
registration_code_redemption = RegistrationCodeRedemption(
registration_code_id=i,
redeemed_by=self.instructor
)
registration_code_redemption.save()
data = {'spent_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_active_course_registration_codes_csv(self):
"""
Test to generate a response of all the active course registration codes
"""
url = reverse('active_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'active_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 9)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'active_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_get_all_course_registration_codes_csv(self):
"""
Test to generate a response of all the course registration codes
"""
url = reverse(
'get_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {'download_company_name': ''}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 14)
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
data = {'download_company_name': 'Group Alpha'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
self.assertEqual(len(body.split('\n')), 11)
def test_pdf_file_throws_exception(self):
"""
test to mock the pdf file generation throws an exception
when generating registration codes.
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 9, 'company_name': 'Group Alpha', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': ''
}
with patch.object(PDFInvoice, 'generate_pdf', side_effect=Exception):
response = self.client.post(generate_code_url, data)
self.assertEqual(response.status_code, 200, response.content)
def test_get_codes_with_sale_invoice(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 5.5, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 122.45, 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 200, response.content)
url = reverse('get_registration_codes',
kwargs={'course_id': self.course.id.to_deprecated_string()})
data = {'download_company_name': 'Group Invoice'}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200, response.content)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_CSV_HEADER))
def test_with_invalid_unit_price(self):
"""
Test to generate a response of all the course registration codes
"""
generate_code_url = reverse(
'generate_registration_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
data = {
'total_registration_codes': 10, 'company_name': 'Group Invoice', 'company_contact_name': 'Test@company.com',
'company_contact_email': 'Test@company.com', 'unit_price': 'invalid', 'recipient_name': 'Test123',
'recipient_email': 'test@123.com', 'address_line_1': 'Portland Street', 'address_line_2': '',
'address_line_3': '', 'city': '', 'state': '', 'zip': '', 'country': '',
'customer_reference_number': '123A23F', 'internal_reference': '', 'invoice': True
}
response = self.client.post(generate_code_url, data, **{'HTTP_HOST': 'localhost'})
self.assertEqual(response.status_code, 400, response.content)
self.assertIn('Could not parse amount as', response.content)
def test_get_historical_coupon_codes(self):
"""
Test to download a response of all the active coupon codes
"""
get_coupon_code_url = reverse(
'get_coupon_codes', kwargs={'course_id': self.course.id.to_deprecated_string()}
)
for i in range(10):
coupon = Coupon(
code='test_code{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True
)
coupon.save()
#now create coupons with the expiration dates
for i in range(5):
coupon = Coupon(
code='coupon{0}'.format(i), description='test_description', course_id=self.course.id,
percentage_discount='{0}'.format(i), created_by=self.instructor, is_active=True,
expiration_date=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=2)
)
coupon.save()
response = self.client.post(get_coupon_code_url)
self.assertEqual(response.status_code, 200, response.content)
# filter all the coupons
for coupon in Coupon.objects.all():
self.assertIn(
'"{coupon_code}","{course_id}","{discount}","{description}","{expiration_date}","{is_active}",'
'"{code_redeemed_count}","{total_discounted_seats}","{total_discounted_amount}"'.format(
coupon_code=coupon.code,
course_id=coupon.course_id,
discount=coupon.percentage_discount,
description=coupon.description,
expiration_date=coupon.display_expiry_date,
is_active=coupon.is_active,
code_redeemed_count="0",
total_discounted_seats="0",
total_discounted_amount="0",
), response.content
)
self.assertEqual(response['Content-Type'], 'text/csv')
body = response.content.replace('\r', '')
self.assertTrue(body.startswith(EXPECTED_COUPON_CSV_HEADER))
@attr(shard=1)
class TestBulkCohorting(SharedModuleStoreTestCase):
"""
Test adding users to cohorts in bulk via CSV upload.
"""
@classmethod
def setUpClass(cls):
super(TestBulkCohorting, cls).setUpClass()
cls.course = CourseFactory.create()
def setUp(self):
super(TestBulkCohorting, self).setUp()
self.staff_user = StaffFactory(course_key=self.course.id)
self.non_staff_user = UserFactory.create()
self.tempdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tempdir)
def call_add_users_to_cohorts(self, csv_data, suffix='.csv'):
"""
Call `add_users_to_cohorts` with a file generated from `csv_data`.
"""
# this temporary file will be removed in `self.tearDown()`
__, file_name = tempfile.mkstemp(suffix=suffix, dir=self.tempdir)
with open(file_name, 'w') as file_pointer:
file_pointer.write(csv_data.encode('utf-8'))
with open(file_name, 'r') as file_pointer:
url = reverse('add_users_to_cohorts', kwargs={'course_id': unicode(self.course.id)})
return self.client.post(url, {'uploaded-file': file_pointer})
def expect_error_on_file_content(self, file_content, error, file_suffix='.csv'):
"""
Verify that we get the error we expect for a given file input.
"""
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content, suffix=file_suffix)
self.assertEqual(response.status_code, 400)
result = json.loads(response.content)
self.assertEqual(result['error'], error)
def verify_success_on_file_content(self, file_content, mock_store_upload, mock_cohort_task):
"""
Verify that `addd_users_to_cohorts` successfully validates the
file content, uploads the input file, and triggers the
background task.
"""
mock_store_upload.return_value = (None, 'fake_file_name.csv')
self.client.login(username=self.staff_user.username, password='test')
response = self.call_add_users_to_cohorts(file_content)
self.assertEqual(response.status_code, 204)
self.assertTrue(mock_store_upload.called)
self.assertTrue(mock_cohort_task.called)
def test_no_cohort_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a cohort field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'username,email\n', "The file must contain a 'cohort' column containing cohort names."
)
def test_no_username_or_email_field(self):
"""
Verify that we get a descriptive verification error when we haven't
included a username or email field in the uploaded CSV.
"""
self.expect_error_on_file_content(
'cohort\n', "The file must contain a 'username' column, an 'email' column, or both."
)
def test_empty_csv(self):
"""
Verify that we get a descriptive verification error when we haven't
included any data in the uploaded CSV.
"""
self.expect_error_on_file_content(
'', "The file must contain a 'cohort' column containing cohort names."
)
def test_wrong_extension(self):
"""
Verify that we get a descriptive verification error when we haven't
uploaded a file with a '.csv' extension.
"""
self.expect_error_on_file_content(
'', "The file must end with the extension '.csv'.", file_suffix='.notcsv'
)
def test_non_staff_no_access(self):
"""
Verify that we can't access the view when we aren't a staff user.
"""
self.client.login(username=self.non_staff_user.username, password='test')
response = self.call_add_users_to_cohorts('')
self.assertEqual(response.status_code, 403)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call a background task when
the CSV has username and cohort columns.
"""
self.verify_success_on_file_content(
'username,cohort\nfoo_username,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has email and cohort columns.
"""
self.verify_success_on_file_content(
'email,cohort\nfoo_email,bar_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_username_and_email(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when the CSV has username, email and cohort columns.
"""
self.verify_success_on_file_content(
'username,email,cohort\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns.
"""
self.verify_success_on_file_content(
'username,email,cohort\rfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
@patch('lms.djangoapps.instructor.views.api.lms.djangoapps.instructor_task.api.submit_cohort_students')
@patch('lms.djangoapps.instructor.views.api.store_uploaded_file')
def test_success_carriage_return_line_feed(self, mock_store_upload, mock_cohort_task):
"""
Verify that we store the input CSV and call the cohorting background
task when lines in the CSV are delimited by carriage returns and line
feeds.
"""
self.verify_success_on_file_content(
'username,email,cohort\r\nfoo_username,bar_email,baz_cohort', mock_store_upload, mock_cohort_task
)
|
pepeportela/edx-platform
|
lms/djangoapps/instructor/tests/test_api.py
|
Python
|
agpl-3.0
| 227,176
|
[
"VisIt"
] |
a05ce79b0837199f6295b5e9aeca8e75b2396f7dbfda91b1db9e37af567acd05
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, techbizdev <techbizdev@paloaltonetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: panos_security_rule
short_description: Create security rule policy on PAN-OS devices or Panorama management console.
description:
- Security policies allow you to enforce rules and take action, and can be as general or specific as needed.
The policy rules are compared against the incoming traffic in sequence, and because the first rule that matches the traffic is applied,
the more specific rules must precede the more general ones.
author: "Ivan Bojer (@ivanbojer), Robert Hagen (@rnh556)"
version_added: "2.4"
requirements:
- pan-python can be obtained from PyPI U(https://pypi.org/project/pan-python/)
- pandevice can be obtained from PyPI U(https://pypi.org/project/pandevice/)
- xmltodict can be obtained from PyPI U(https://pypi.org/project/xmltodict/)
deprecated:
alternative: Use U(https://galaxy.ansible.com/PaloAltoNetworks/paloaltonetworks) instead.
removed_in: "2.12"
why: Consolidating code base.
notes:
- Checkmode is not supported.
- Panorama is supported.
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device being configured.
required: true
username:
description:
- Username credentials to use for auth unless I(api_key) is set.
default: "admin"
password:
description:
- Password credentials to use for auth unless I(api_key) is set.
required: true
api_key:
description:
- API key that can be used instead of I(username)/I(password) credentials.
operation:
description:
- The action to be taken. Supported values are I(add)/I(update)/I(find)/I(delete).
default: 'add'
choices:
- add
- update
- delete
- find
category:
description:
- The category.
type: list
default: ['any']
rule_name:
description:
- Name of the security rule.
required: true
rule_type:
description:
- Type of security rule (version 6.1 of PanOS and above).
default: "universal"
description:
description:
- Description for the security rule.
tag_name:
description:
- Administrative tags that can be added to the rule. Note, tags must be already defined.
source_zone:
description:
- List of source zones.
default: "any"
destination_zone:
description:
- List of destination zones.
default: "any"
source_ip:
description:
- List of source addresses.
default: "any"
source_user:
description:
- Use users to enforce policy for individual users or a group of users.
default: "any"
hip_profiles:
description: >
- If you are using GlobalProtect with host information profile (HIP) enabled, you can also base the policy
on information collected by GlobalProtect. For example, the user access level can be determined HIP that
notifies the firewall about the user's local configuration.
default: "any"
destination_ip:
description:
- List of destination addresses.
default: "any"
application:
description:
- List of applications.
default: "any"
service:
description:
- List of services.
default: "application-default"
log_start:
description:
- Whether to log at session start.
type: bool
log_end:
description:
- Whether to log at session end.
default: true
type: bool
action:
description:
- Action to apply once rules maches.
default: "allow"
group_profile:
description: >
- Security profile group that is already defined in the system. This property supersedes antivirus,
vulnerability, spyware, url_filtering, file_blocking, data_filtering, and wildfire_analysis properties.
antivirus:
description:
- Name of the already defined antivirus profile.
vulnerability:
description:
- Name of the already defined vulnerability profile.
spyware:
description:
- Name of the already defined spyware profile.
url_filtering:
description:
- Name of the already defined url_filtering profile.
file_blocking:
description:
- Name of the already defined file_blocking profile.
data_filtering:
description:
- Name of the already defined data_filtering profile.
wildfire_analysis:
description:
- Name of the already defined wildfire_analysis profile.
devicegroup:
description: >
- Device groups are used for the Panorama interaction with Firewall(s). The group must exists on Panorama.
If device group is not define we assume that we are contacting Firewall.
commit:
description:
- Commit configuration if changed.
type: bool
default: 'yes'
'''
EXAMPLES = '''
- name: add an SSH inbound rule to devicegroup
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'SSH permit'
description: 'SSH rule test'
tag_name: ['ProjectX']
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['any']
application: ['ssh']
service: ['application-default']
hip_profiles: ['any']
action: 'allow'
devicegroup: 'Cloud Edge'
- name: add a rule to allow HTTP multimedia only from CDNs
panos_security_rule:
ip_address: '10.5.172.91'
username: 'admin'
password: 'paloalto'
operation: 'add'
rule_name: 'HTTP Multimedia'
description: 'Allow HTTP multimedia only to host at 1.1.1.1'
source_zone: ['public']
destination_zone: ['private']
source_ip: ['any']
source_user: ['any']
destination_ip: ['1.1.1.1']
category: ['content-delivery-networks']
application: ['http-video', 'http-audio']
service: ['service-http', 'service-https']
hip_profiles: ['any']
action: 'allow'
- name: add a more complex rule that uses security profiles
panos_security_rule:
ip_address: '{{ ip_address }}'
username: '{{ username }}'
password: '{{ password }}'
operation: 'add'
rule_name: 'Allow HTTP w profile'
log_start: false
log_end: true
action: 'allow'
antivirus: 'default'
vulnerability: 'default'
spyware: 'default'
url_filtering: 'default'
wildfire_analysis: 'default'
- name: delete a devicegroup security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
api_key: '{{ api_key }}'
operation: 'delete'
rule_name: 'Allow telnet'
devicegroup: 'DC Firewalls'
- name: find a specific security rule
panos_security_rule:
ip_address: '{{ ip_address }}'
password: '{{ password }}'
operation: 'find'
rule_name: 'Allow RDP to DCs'
register: result
- debug: msg='{{result.stdout_lines}}'
'''
RETURN = '''
# Default return values
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
try:
import pan.xapi
from pan.xapi import PanXapiError
import pandevice
from pandevice import base
from pandevice import firewall
from pandevice import panorama
from pandevice import objects
from pandevice import policies
import xmltodict
import json
HAS_LIB = True
except ImportError:
HAS_LIB = False
def get_devicegroup(device, devicegroup):
dg_list = device.refresh_devices()
for group in dg_list:
if isinstance(group, pandevice.panorama.DeviceGroup):
if group.name == devicegroup:
return group
return False
def get_rulebase(device, devicegroup):
# Build the rulebase
if isinstance(device, pandevice.firewall.Firewall):
rulebase = pandevice.policies.Rulebase()
device.add(rulebase)
elif isinstance(device, pandevice.panorama.Panorama):
dg = panorama.DeviceGroup(devicegroup)
device.add(dg)
rulebase = policies.PreRulebase()
dg.add(rulebase)
else:
return False
policies.SecurityRule.refreshall(rulebase)
return rulebase
def find_rule(rulebase, rule_name):
# Search for the rule name
rule = rulebase.find(rule_name)
if rule:
return rule
else:
return False
def rule_is_match(propose_rule, current_rule):
match_check = ['name', 'description', 'group_profile', 'antivirus', 'vulnerability',
'spyware', 'url_filtering', 'file_blocking', 'data_filtering',
'wildfire_analysis', 'type', 'action', 'tag', 'log_start', 'log_end']
list_check = ['tozone', 'fromzone', 'source', 'source_user', 'destination', 'category',
'application', 'service', 'hip_profiles']
for check in match_check:
propose_check = getattr(propose_rule, check, None)
current_check = getattr(current_rule, check, None)
if propose_check != current_check:
return False
for check in list_check:
propose_check = getattr(propose_rule, check, [])
current_check = getattr(current_rule, check, [])
if set(propose_check) != set(current_check):
return False
return True
def create_security_rule(**kwargs):
security_rule = policies.SecurityRule(
name=kwargs['rule_name'],
description=kwargs['description'],
fromzone=kwargs['source_zone'],
source=kwargs['source_ip'],
source_user=kwargs['source_user'],
hip_profiles=kwargs['hip_profiles'],
tozone=kwargs['destination_zone'],
destination=kwargs['destination_ip'],
application=kwargs['application'],
service=kwargs['service'],
category=kwargs['category'],
log_start=kwargs['log_start'],
log_end=kwargs['log_end'],
action=kwargs['action'],
type=kwargs['rule_type']
)
if 'tag_name' in kwargs:
security_rule.tag = kwargs['tag_name']
# profile settings
if 'group_profile' in kwargs:
security_rule.group = kwargs['group_profile']
else:
if 'antivirus' in kwargs:
security_rule.virus = kwargs['antivirus']
if 'vulnerability' in kwargs:
security_rule.vulnerability = kwargs['vulnerability']
if 'spyware' in kwargs:
security_rule.spyware = kwargs['spyware']
if 'url_filtering' in kwargs:
security_rule.url_filtering = kwargs['url_filtering']
if 'file_blocking' in kwargs:
security_rule.file_blocking = kwargs['file_blocking']
if 'data_filtering' in kwargs:
security_rule.data_filtering = kwargs['data_filtering']
if 'wildfire_analysis' in kwargs:
security_rule.wildfire_analysis = kwargs['wildfire_analysis']
return security_rule
def add_rule(rulebase, sec_rule):
if rulebase:
rulebase.add(sec_rule)
sec_rule.create()
return True
else:
return False
def update_rule(rulebase, nat_rule):
if rulebase:
rulebase.add(nat_rule)
nat_rule.apply()
return True
else:
return False
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(no_log=True),
username=dict(default='admin'),
api_key=dict(no_log=True),
operation=dict(default='add', choices=['add', 'update', 'delete', 'find']),
rule_name=dict(required=True),
description=dict(default=''),
tag_name=dict(type='list'),
destination_zone=dict(type='list', default=['any']),
source_zone=dict(type='list', default=['any']),
source_ip=dict(type='list', default=["any"]),
source_user=dict(type='list', default=['any']),
destination_ip=dict(type='list', default=["any"]),
category=dict(type='list', default=['any']),
application=dict(type='list', default=['any']),
service=dict(type='list', default=['application-default']),
hip_profiles=dict(type='list', default=['any']),
group_profile=dict(),
antivirus=dict(),
vulnerability=dict(),
spyware=dict(),
url_filtering=dict(),
file_blocking=dict(),
data_filtering=dict(),
wildfire_analysis=dict(),
log_start=dict(type='bool', default=False),
log_end=dict(type='bool', default=True),
rule_type=dict(default='universal'),
action=dict(default='allow'),
devicegroup=dict(),
commit=dict(type='bool', default=True)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False,
required_one_of=[['api_key', 'password']])
if not HAS_LIB:
module.fail_json(msg='Missing required libraries.')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
api_key = module.params['api_key']
operation = module.params['operation']
rule_name = module.params['rule_name']
description = module.params['description']
tag_name = module.params['tag_name']
source_zone = module.params['source_zone']
source_ip = module.params['source_ip']
source_user = module.params['source_user']
hip_profiles = module.params['hip_profiles']
destination_zone = module.params['destination_zone']
destination_ip = module.params['destination_ip']
application = module.params['application']
service = module.params['service']
category = module.params['category']
log_start = module.params['log_start']
log_end = module.params['log_end']
action = module.params['action']
group_profile = module.params['group_profile']
antivirus = module.params['antivirus']
vulnerability = module.params['vulnerability']
spyware = module.params['spyware']
url_filtering = module.params['url_filtering']
file_blocking = module.params['file_blocking']
data_filtering = module.params['data_filtering']
wildfire_analysis = module.params['wildfire_analysis']
rule_type = module.params['rule_type']
devicegroup = module.params['devicegroup']
commit = module.params['commit']
# Create the device with the appropriate pandevice type
device = base.PanDevice.create_from_device(ip_address, username, password, api_key=api_key)
# If Panorama, validate the devicegroup
dev_group = None
if devicegroup and isinstance(device, panorama.Panorama):
dev_group = get_devicegroup(device, devicegroup)
if dev_group:
device.add(dev_group)
else:
module.fail_json(msg='\'%s\' device group not found in Panorama. Is the name correct?' % devicegroup)
# Get the rulebase
rulebase = get_rulebase(device, dev_group)
# Which action shall we take on the object?
if operation == "find":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, format and return the result
if match:
match_dict = xmltodict.parse(match.element_str())
module.exit_json(
stdout_lines=json.dumps(match_dict, indent=2),
msg='Rule matched'
)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "delete":
# Search for the object
match = find_rule(rulebase, rule_name)
# If found, delete it
if match:
try:
if commit:
match.delete()
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=True, msg='Rule \'%s\' successfully deleted' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' not found. Is the name correct?' % rule_name)
elif operation == "add":
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
# Search for the rule. Fail if found.
match = find_rule(rulebase, rule_name)
if match:
if rule_is_match(match, new_rule):
module.exit_json(changed=False, msg='Rule \'%s\' is already in place' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' already exists. Use operation: \'update\' to change it.' % rule_name)
else:
try:
changed = add_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully added' % rule_name)
elif operation == 'update':
# Search for the rule. Update if found.
match = find_rule(rulebase, rule_name)
if match:
try:
new_rule = create_security_rule(
rule_name=rule_name,
description=description,
tag_name=tag_name,
source_zone=source_zone,
destination_zone=destination_zone,
source_ip=source_ip,
source_user=source_user,
destination_ip=destination_ip,
category=category,
application=application,
service=service,
hip_profiles=hip_profiles,
group_profile=group_profile,
antivirus=antivirus,
vulnerability=vulnerability,
spyware=spyware,
url_filtering=url_filtering,
file_blocking=file_blocking,
data_filtering=data_filtering,
wildfire_analysis=wildfire_analysis,
log_start=log_start,
log_end=log_end,
rule_type=rule_type,
action=action
)
changed = update_rule(rulebase, new_rule)
if changed and commit:
device.commit(sync=True)
except PanXapiError as exc:
module.fail_json(msg=to_native(exc))
module.exit_json(changed=changed, msg='Rule \'%s\' successfully updated' % rule_name)
else:
module.fail_json(msg='Rule \'%s\' does not exist. Use operation: \'add\' to add it.' % rule_name)
if __name__ == '__main__':
main()
|
alxgu/ansible
|
lib/ansible/modules/network/panos/_panos_security_rule.py
|
Python
|
gpl-3.0
| 20,230
|
[
"Galaxy"
] |
5a93d6cde5714403e8cc0c031e7b66f0139cb13638935fff47a08afd801b10ba
|
#!/usr/bin/env python
"""
Get informations for a given production
Example:
$ dirac-prod-get 381
"""
import DIRAC
from DIRAC.Core.Base.Script import Script
@Script()
def main():
# Registering arguments will automatically add their description to the help menu
Script.registerArgument("prodID: Production ID")
_, args = Script.parseCommandLine()
from DIRAC.Core.Utilities.PrettyPrint import printTable
from DIRAC.ProductionSystem.Client.ProductionClient import ProductionClient
prodClient = ProductionClient()
# get arguments
prodID = args[0]
res = prodClient.getProduction(prodID)
fields = ["ProductionName", "Status", "ProductionID", "CreationDate", "LastUpdate", "AuthorDN", "AuthorGroup"]
records = []
if res["OK"]:
prodList = res["Value"]
if not isinstance(res["Value"], list):
prodList = [res["Value"]]
for prod in prodList:
records.append(
[
str(prod["ProductionName"]),
str(prod["Status"]),
str(prod["ProductionID"]),
str(prod["CreationDate"]),
str(prod["LastUpdate"]),
str(prod["AuthorDN"]),
str(prod["AuthorGroup"]),
]
)
else:
DIRAC.gLogger.error(res["Message"])
DIRAC.exit(-1)
printTable(fields, records)
DIRAC.exit(0)
if __name__ == "__main__":
main()
|
DIRACGrid/DIRAC
|
src/DIRAC/ProductionSystem/scripts/dirac_prod_get.py
|
Python
|
gpl-3.0
| 1,488
|
[
"DIRAC"
] |
1ff02858af1b9cceb1d05bb0e546200df4b5bb565d0c2d43e918775378de4936
|
# Copyright (c) Charl P. Botha, TU Delft
# All rights reserved.
# See COPYRIGHT for details.
# TODO:
# * this module is not sensitive to changes in its inputs... it should
# register observers and run _createPipelines if/when they change.
from imageStackRDR import imageStackClass
from module_base import ModuleBase
from module_mixins import NoConfigModuleMixin
import fixitk as itk
from typeModules.transformStackClass import transformStackClass
from typeModules.imageStackClass import imageStackClass
import vtk
import ConnectVTKITKPython as CVIPy
class transform2D(NoConfigModuleMixin, ModuleBase):
"""This apply a stack of transforms to a stack of images in an
accumulative fashion, i.e. imageN is transformed:
Tn(Tn-1(...(T1(imageN))).
The result of this filter is a
vtkImageData, ready for using in your friendly neighbourhood
visualisation pipeline.
NOTE: this module was currently kludged to transform 1:N images (and not
0:N). 11/11/2004 (joris): kludge removed.
"""
def __init__(self, module_manager):
ModuleBase.__init__(self, module_manager)
NoConfigModuleMixin.__init__(self)
self._imageStack = None
self._transformStack = None
#
self._itkExporterStack = []
self._imageAppend = vtk.vtkImageAppend()
# stack of images should become volume
self._imageAppend.SetAppendAxis(2)
self._viewFrame = self._createViewFrame(
{'Module (self)' : self})
self.config_to_logic()
self.logic_to_config()
self.config_to_view()
def close(self):
# just in case
self.set_input(0, None)
self.set_input(1, None)
# take care of our refs so that things can disappear
self._destroyPipelines()
del self._itkExporterStack
del self._imageAppend
NoConfigModuleMixin.close(self)
ModuleBase.close(self)
def get_input_descriptions(self):
return ('ITK Image Stack', '2D Transform Stack')
def set_input(self, idx, inputStream):
if idx == 0:
if inputStream != self._imageStack:
# if it's None, we have to take it
if inputStream == None:
# disconnect
self._imageStack = None
self._destroyPipelines()
return
# let's setup for a new stack!
try:
assert(inputStream.__class__.__name__ == 'imageStackClass')
inputStream.Update()
assert(len(inputStream) >= 2)
except Exception:
# if the Update call doesn't work or
# if the input list is not long enough (or unsizable),
# we don't do anything
raise TypeError, \
"register2D requires an ITK Image Stack of minimum length 2 as input."
# now check that the imageStack is the same size as the
# transformStack
if self._transformStack and \
len(inputStream) != len(self._transformStack):
raise TypeError, \
"The Image Stack you are trying to connect has a\n" \
"different length than the connected Transform\n" \
"Stack."
self._imageStack = inputStream
self._createPipelines()
else: # closes if idx == 0 block
if inputStream != self._transformStack:
if inputStream == None:
self._transformStack = None
self._destroyPipelines()
return
try:
assert(inputStream.__class__.__name__ == \
'transformStackClass')
except Exception:
raise TypeError, \
"register2D requires an ITK Transform Stack on " \
"this port."
inputStream.Update()
if len(inputStream) < 2:
raise TypeError, \
"The input transform stack should be of minimum " \
"length 2."
if self._imageStack and \
len(inputStream) != len(self._imageStack):
raise TypeError, \
"The Transform Stack you are trying to connect\n" \
"has a different length than the connected\n" \
"Transform Stack"
self._transformStack = inputStream
self._createPipelines()
# closes else
def get_output_descriptions(self):
return ('vtkImageData',)
def get_output(self, idx):
return self._imageAppend.GetOutput()
def execute_module(self):
pass
def logic_to_config(self):
pass
def config_to_logic(self):
pass
def view_to_config(self):
pass
def config_to_view(self):
pass
# ----------------------------------------------------------------------
# non-API methods start here -------------------------------------------
# ----------------------------------------------------------------------
def _createPipelines(self):
"""Setup all necessary logic to transform, combine and convert all
input images.
Call this ONLY if things have changed, i.e. when
your change observer is called or if the transform2D input ports
are changed.
"""
if not self._imageStack or not self._transformStack:
self._destroyPipelines()
# in this case, we should break down the pipeline
return
# take care of all inputs
self._imageAppend.RemoveAllInputs()
#totalTrfm = itk.itkEuler2DTransform_New()
totalTrfm = itk.itkCenteredRigid2DTransform_New()
totalTrfm.SetIdentity()
prevImage = self._imageStack[0]
for trfm, img, i in zip(self._transformStack,
self._imageStack,
range(len(self._imageStack))):
# accumulate with our totalTransform
totalTrfm.Compose(trfm.GetPointer(), 0)
# make a copy of the totalTransform that we can use on
# THIS image
# copyTotalTrfm = itk.itkEuler2DTransform_New()
copyTotalTrfm = itk.itkCenteredRigid2DTransform_New()
# this is a really kludge way to copy the total transform,
# as concatenation doesn't update the Parameters member, so
# getting and setting parameters is not the way to go
copyTotalTrfm.SetIdentity()
copyTotalTrfm.Compose(totalTrfm.GetPointer(),0)
# this SHOULD have worked
#pda = totalTrfm.GetParameters()
#copyTotalTrfm.SetParameters(pda)
# this actually increases the ref count of the transform!
# resampler
resampler = itk.itkResampleImageFilterF2F2_New()
resampler.SetTransform(copyTotalTrfm.GetPointer())
resampler.SetInput(img)
region = prevImage.GetLargestPossibleRegion()
resampler.SetSize(region.GetSize())
resampler.SetOutputSpacing(prevImage.GetSpacing())
resampler.SetOutputOrigin(prevImage.GetOrigin())
resampler.SetDefaultPixelValue(0)
# set up all the
rescaler = itk.itkRescaleIntensityImageFilterF2US2_New()
rescaler.SetOutputMinimum(0)
rescaler.SetOutputMaximum(65535)
rescaler.SetInput(resampler.GetOutput())
print "Resampling image %d" % (i,)
rescaler.Update() # give ITK a chance to complain
itkExporter = itk.itkVTKImageExportUS2_New()
itkExporter.SetInput(rescaler.GetOutput())
# this is so the ref keeps hanging around
self._itkExporterStack.append(itkExporter)
vtkImporter = vtk.vtkImageImport()
CVIPy.ConnectITKUS2ToVTK(itkExporter.GetPointer(),
vtkImporter)
# FIXME KLUDGE: we ignore image 0 (this is for joris)
# if i > 0:
# self._imageAppend.AddInput(vtkImporter.GetOutput())
# setup the previous Image for the next loop
prevImage = img
# things should now work *cough*
def _destroyPipelines(self):
if not self._imageStack or not self._transformStack:
self._imageAppend.RemoveAllInputs()
del self._itkExporterStack[:]
|
chrisidefix/devide
|
modules/insight/transform2D.py
|
Python
|
bsd-3-clause
| 8,857
|
[
"VTK"
] |
e16a73014adb524dd40284243a2e8b4051a46d3deac1ddf84d715a6219b21d1c
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible.compat.six import string_types
from ansible.compat.six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained). If
the input data to this function came from an untrusted (remote) source,
it should first be run through _clean_data_struct() to ensure the data
is further sanitized prior to evaluation.
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
JSON_TYPES = {
'false': False,
'null': None,
'true': True,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, JSON_TYPES, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
jordiclariana/ansible
|
lib/ansible/template/safe_eval.py
|
Python
|
gpl-3.0
| 4,626
|
[
"VisIt"
] |
f31482f7e60021e0e6aaa28f2a6fc97b4047e0fba27f0cd50d91cf17ab18d018
|
from __future__ import unicode_literals
import datetime
import json
import pytz
import uuid
from calendar import timegm
from hashlib import sha1
from urlparse import urlparse
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import FakePayload
from django.utils.encoding import force_str
from pycon.models import PyConTalkProposal, ThunderdomeGroup
from pycon.tests.factories import PyConTalkProposalFactory, ThunderdomeGroupFactory, \
ProposalResultFactory
from .models import APIAuth, ProposalData, IRCLogLine
from .decorators import DATETIME_FORMAT
from symposion.schedule.models import Presentation
from symposion.schedule.tests.factories import PresentationFactory
class RawDataClientMixin(object):
"""Mix this into a TestCase class to be able to post raw data through
the test client and use API keys (put at self.auth_key).
"""
def post_raw_data(self, path, post_data):
"""
The built-in test client's post() method assumes the data you pass
is a dictionary and encodes it. If we just want to pass the data
unmodified, we need our own version of post().
"""
parsed = urlparse(path)
r = {
'CONTENT_LENGTH': len(post_data),
'CONTENT_TYPE': "text/plain",
'PATH_INFO': self.client._get_path(parsed),
'QUERY_STRING': force_str(parsed[4]),
'REQUEST_METHOD': str('POST'),
'wsgi.input': FakePayload(post_data),
}
# Add the request signature to the headers being sent.
r.update(self.get_signature(path, method='POST', body=post_data))
# Make the actual request.
return self.client.request(**r)
def get_signature(self, uri, method='GET', body=''):
"""Return a dictionary with the API key and API get_signature
to be sent for the given request."""
# What time is it now?
timestamp = timegm(datetime.datetime.now(tz=pytz.UTC).timetuple())
# Calculate the base string to use for the signature.
base_string = unicode(''.join((
self.auth_key.secret,
unicode(timestamp),
method.upper(),
uri,
body,
))).encode('utf-8')
# Return a dictionary with the headers to send.
return {
'HTTP_X_API_KEY': self.auth_key.auth_key,
'HTTP_X_API_SIGNATURE': sha1(base_string).hexdigest(),
'HTTP_X_API_TIMESTAMP': timestamp,
}
class ThunderdomeGroupListApiTest(RawDataClientMixin, TestCase):
def setUp(self):
self.auth_key = APIAuth.objects.create(name="test")
self.url = reverse('thunderdome_groups')
def test_get_some(self):
ThunderdomeGroupFactory(label='curly', code='3')
ThunderdomeGroupFactory(label='larry', code='2')
ThunderdomeGroupFactory(label='moe', code='1')
rsp = self.client.get(self.url, **self.get_signature(self.url))
self.assertEqual(200, rsp.status_code)
data = json.loads(rsp.content)
groups = data['data']
# We got the 3 groups, in order by code
self.assertEqual(groups[0]['label'], 'moe')
self.assertEqual(groups[1]['label'], 'larry')
self.assertEqual(groups[2]['label'], 'curly')
def test_get_undecided(self):
ThunderdomeGroupFactory(label='curly', code='3')
ThunderdomeGroupFactory(label='larry', code='2', decided=True)
ThunderdomeGroupFactory(label='moe', code='1', decided=True)
url = self.url + "?undecided=1"
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(200, rsp.status_code)
data = json.loads(rsp.content)
groups = data['data']
self.assertEqual(1, len(groups))
self.assertEqual('curly', groups[0]['label'])
class ThunderdomeGroupAddApiTest(RawDataClientMixin, TestCase):
def setUp(self):
self.auth_key = APIAuth.objects.create(name="test")
self.url = reverse('thunderdome_group_add')
def test_get(self):
# This is post-only
rsp = self.client.get(self.url, **self.get_signature(self.url))
self.assertEqual(405, rsp.status_code)
def test_make_one(self):
data = {
'label': 'My_label',
'code': 'My_code'
}
rsp = self.post_raw_data(self.url, json.dumps(data))
self.assertEqual(201, rsp.status_code)
group = ThunderdomeGroup.objects.get()
self.assertEqual(group.code, 'my-code') # _ changes to - and all lowered
self.assertEqual(group.label, 'My_label')
# Response includes the modified code
code = json.loads(rsp.content)['data']['code']
self.assertEqual(code, group.code)
def test_missing_label(self):
data = {
'code': 'My_code'
}
rsp = self.post_raw_data(self.url, json.dumps(data))
self.assertEqual(400, rsp.status_code)
def test_missing_code(self):
data = {
'label': 'My_label',
}
rsp = self.post_raw_data(self.url, json.dumps(data))
self.assertEqual(400, rsp.status_code)
class ThunderdomeGroupDecideTest(RawDataClientMixin, TestCase):
def setUp(self):
self.group = ThunderdomeGroupFactory(code='fred')
self.auth_key = APIAuth.objects.create(name="test")
self.url = reverse('thunderdome_group_decide', args=(self.group.code,))
self.talk1 = PyConTalkProposalFactory(thunderdome_group=self.group)
self.talk2 = PyConTalkProposalFactory(thunderdome_group=self.group)
ProposalResultFactory(proposal=self.talk1, status="undecided")
ProposalResultFactory(proposal=self.talk2, status="undecided")
def test_get(self):
# This is post-only
rsp = self.client.get(self.url, **self.get_signature(self.url))
self.assertEqual(405, rsp.status_code)
def test_no_such_group(self):
bad_id = self.group.id + 1
url = reverse('thunderdome_group_decide', args=(bad_id,))
rsp = self.post_raw_data(url, '')
self.assertEqual(400, rsp.status_code, rsp.content.decode('utf-8'))
def test_undeciding_a_group(self):
# If no talk statuses are provided, all talk statuses should
# change to standby
data = {}
rsp = self.post_raw_data(self.url, json.dumps(data))
self.assertEqual(202, rsp.status_code, rsp.content.decode('utf-8'))
ThunderdomeGroup.objects.get(id=self.group.id)
def test_not_all_talks(self):
# We only process if all talks in the group have a new status provided
data = {
'talks': [
(self.talk1.id, 'accepted'),
]
}
rsp = self.post_raw_data(self.url, json.dumps(data))
self.assertEqual(400, rsp.status_code, rsp.content.decode('utf-8'))
def test_update_talk_statuses(self):
data = {
'talks': [
(self.talk1.id, 'accepted'),
(self.talk2.id, 'rejected')
]
}
self.post_raw_data(self.url, json.dumps(data))
talk1 = PyConTalkProposal.objects.get(id=self.talk1.id)
self.assertEqual('accepted', talk1.result.status)
talk1 = PyConTalkProposal.objects.get(id=self.talk2.id)
self.assertEqual('rejected', talk1.result.status)
class PyConIRCLogsApiTest(TestCase, RawDataClientMixin):
def setUp(self):
self.auth_key = APIAuth.objects.create(name="test")
self.proposal = PyConTalkProposalFactory.create()
def test_get_logs_bad_auth(self):
# Bad auth key
auth_key = uuid.uuid4() # random key
url = reverse('proposal_irc_logs',
kwargs={'proposal_id': str(self.proposal.id)})
rsp = self.client.get(url, HTTP_X_API_KEY=str(auth_key))
self.assertEqual(403, rsp.status_code)
self.assertEqual(
json.loads(rsp.content)['error'],
'The API Key provided is not valid.',
)
def test_get_logs_disabled_auth(self):
# Auth disabled
self.auth_key.enabled = False
self.auth_key.save()
url = reverse('proposal_irc_logs',
kwargs={'proposal_id': str(self.proposal.id)}
)
rsp = self.client.get(url, HTTP_X_API_KEY=self.auth_key.auth_key)
self.assertEqual(403, rsp.status_code)
self.assertEqual(
json.loads(rsp.content)['error'],
'The API Key provided is not valid.',
)
def test_get_logs_no_data(self):
# No logs for that proposal
url = reverse('proposal_irc_logs', kwargs={
'proposal_id': str(self.proposal.id),
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(200, rsp.status_code, rsp.content)
logs = json.loads(rsp.content)['data']
self.assertEqual([], logs)
def test_get_logs_bad_proposal(self):
# Proposal does not exist
self.proposal.delete()
url = reverse('proposal_irc_logs', kwargs={
'proposal_id': str(self.proposal.id),
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(404, rsp.status_code)
def test_get_logs_data(self):
# Get a couple of lines
# Create the lines we'll get
LINE1 = "Now is the time for all good folks to dance."
LINE2 = "A completely different log line"
USER1 = "Jim Bob"
USER2 = "George Washington"
now = datetime.datetime.now()
# make sure they have different timestamps, and that microseconds
# are preserved
then = now + datetime.timedelta(microseconds=1)
IRCLogLine.objects.create(proposal=self.proposal, line=LINE1,
user=USER1,
timestamp=now.strftime(DATETIME_FORMAT))
IRCLogLine.objects.create(proposal=self.proposal, line=LINE2,
user=USER2,
timestamp=then.strftime(DATETIME_FORMAT))
# Create another proposal and a line to make sure we
# don't get it in the results
self.proposal2 = PyConTalkProposalFactory.create()
later = then + datetime.timedelta(seconds=2)
IRCLogLine.objects.create(proposal=self.proposal2, line="wrong",
user="wrong",
timestamp=later.strftime(DATETIME_FORMAT))
url = reverse('proposal_irc_logs', kwargs={
'proposal_id': str(self.proposal.id),
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(200, rsp.status_code)
logs = json.loads(rsp.content)['data']
self.assertEqual(2, len(logs))
# They should come out in timestamp order. Data, including time
# to the microsecond, should be preserved.
self.assertEqual(LINE1, logs[0]['line'])
self.assertEqual(USER1, logs[0]['user'])
self.assertEqual(now.strftime(DATETIME_FORMAT), logs[0]['timestamp'])
self.assertEqual(LINE2, logs[1]['line'])
self.assertEqual(then.strftime(DATETIME_FORMAT), logs[1]['timestamp'])
self.assertEqual(USER2, logs[1]['user'])
def test_set_data(self):
# We can set data and it ends up in the database
url = reverse('proposal_irc_logs', kwargs={
'proposal_id': str(self.proposal.id)
})
now = datetime.datetime.now()
now_formatted = now.strftime(DATETIME_FORMAT)
LINE = "Now is the time for all good folks to dance."
USER = "Jim Bob"
logs = [
{
'timestamp': now_formatted,
'line': LINE,
'user': USER,
}
]
json_data = json.dumps(logs)
rsp = self.post_raw_data(url, post_data=json_data)
self.assertEqual(201, rsp.status_code, rsp.content)
# Should only be one log entry
log = IRCLogLine.objects.get()
self.assertEqual(self.proposal.id, log.proposal_id)
self.assertEqual(LINE, log.line)
self.assertEqual(now, log.timestamp)
self.assertEqual(USER, log.user)
def test_set_data_bad_proposal(self):
# proposal does not exist
url = reverse('proposal_irc_logs', kwargs={
'proposal_id': 999,
})
now = datetime.datetime.now()
now_formatted = now.strftime(DATETIME_FORMAT)
LINE = "Now is the time for all good folks to dance."
USER = "Jim Bob"
logs = [
{
'timestamp': now_formatted,
'line': LINE,
'user': USER,
}
]
json_data = json.dumps(logs)
rsp = self.post_raw_data(url, post_data=json_data)
self.assertEqual(404, rsp.status_code)
class PyConProposalDataApiTest(TestCase, RawDataClientMixin):
def setUp(self):
self.auth_key = APIAuth.objects.create(name="test")
self.proposal = PyConTalkProposalFactory.create()
def test_get_data_bad_auth(self):
self.auth_key.secret = uuid.uuid4()
# If proposal has no data, we get back an empty string.
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
rsp = self.client.get(url)
self.assertEqual(403, rsp.status_code)
self.assertEqual(
json.loads(rsp.content)['error'],
'API Key not provided.',
)
def test_get_data_disabled_auth(self):
self.auth_key.enabled = False
self.auth_key.save()
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(403, rsp.status_code)
self.assertEqual(
json.loads(rsp.content)['error'],
'The API Key provided is not valid.',
)
def test_get_data(self):
# If proposal has data, we get it.
TEST_DATA = 'now is the time for all good people...'
ProposalData.objects.create(proposal=self.proposal,
data=json.dumps(TEST_DATA)),
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(200, rsp.status_code, rsp.content)
self.assertEqual(TEST_DATA, json.loads(rsp.content)['data']['extra'])
def test_set_data(self):
# We can set data and it ends up in the database
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
TEST_DATA = {'stuff': 'Foo! Bar! Sis boom bah!'}
rsp = self.post_raw_data(url, post_data=json.dumps(TEST_DATA))
self.assertEqual(202, rsp.status_code, rsp.content)
proposal = PyConTalkProposal.objects.get(id=self.proposal.id)
self.assertEqual(TEST_DATA, json.loads(proposal.data.data))
def test_replace_data(self):
# If data already exists, a set replaces it
TEST_DATA = {'stuff': 'now is the time for all good people...'}
ProposalData.objects.create(proposal=self.proposal,
data=TEST_DATA)
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
TEST_DATA = {'stuff': 'Foo! Bar! Sis boom bah!'}
rsp = self.post_raw_data(url, post_data=json.dumps(TEST_DATA))
self.assertEqual(202, rsp.status_code, rsp.content)
proposal = PyConTalkProposal.objects.get(id=self.proposal.id)
self.assertEqual(TEST_DATA, json.loads(proposal.data.data))
def test_round_trip(self):
# We can set data using the API, and get it back using the API
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
TEST_DATA = {'stuff': 'Foo! Bar! Sis boom bah!'}
rsp = self.post_raw_data(url, post_data=json.dumps(TEST_DATA))
self.assertEqual(202, rsp.status_code, rsp.content)
# Now establish that we can get it back.
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(200, rsp.status_code)
self.assertEqual(TEST_DATA, json.loads(rsp.content)['data']['extra'])
def test_get_no_proposal(self):
# If there's no such proposal, we get back a 404
url = reverse('proposal_detail', kwargs={
'proposal_id': str(self.proposal.id) + "0099",
})
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(404, rsp.status_code)
def test_get_bad_auth(self):
# Bad auth key fails
bad_auth_key = uuid.uuid4() # another random key, it will not match
url = reverse('proposal_detail', kwargs={
'proposal_id': self.proposal.id,
})
rsp = self.client.get(url, HTTP_X_API_KEY=str(bad_auth_key))
self.assertEqual(403, rsp.status_code)
def test_list_view(self):
url = reverse('proposal_list')
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(rsp.status_code, 200, rsp.content)
self.assertEqual(len(json.loads(rsp.content)['data']), 1)
def test_list_view_talks_only(self):
url = reverse('proposal_list') + '?type=talk'
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(rsp.status_code, 200, rsp.content)
self.assertEqual(len(json.loads(rsp.content)['data']), 1)
def test_list_view_tutorials_only(self):
url = reverse('proposal_list') + '?type=tutorial'
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(rsp.status_code, 200, rsp.content)
self.assertEqual(len(json.loads(rsp.content)['data']), 0)
def test_list_view_undecided_only(self):
url = reverse('proposal_list') + '?status=undecided'
rsp = self.client.get(url, **self.get_signature(url))
self.assertEqual(rsp.status_code, 200, rsp.content)
self.assertEqual(len(json.loads(rsp.content)['data']), 1)
class SetPresentationURLsTest(RawDataClientMixin, TestCase):
def setUp(self):
self.auth_key = APIAuth.objects.create(name="test")
self.presentation = PresentationFactory(
video_url='http://video.example.com',
assets_url='http://assets.example.com',
slides_url='http://slides.example.com',
)
self.url = reverse('set_talk_urls', args=[self.presentation.slot.pk])
def test_invalid_request_data(self):
TEST_DATA = {'stuff': 'Foo! Bar! Sis boom bah!'}
rsp = self.post_raw_data(self.url, post_data=json.dumps(TEST_DATA))
self.assertEqual(400, rsp.status_code, rsp.content)
response_data = json.loads(rsp.content)
self.assertEqual({'code': 400,
'data': {
'error':
'Must provide at least one of video_url, slides_url, '
'and assets_url.'}},
response_data)
def test_change_assets_url(self):
# A valid request
TEST_DATA = {'assets_url': 'http://example.com'}
rsp = self.post_raw_data(self.url, post_data=json.dumps(TEST_DATA))
self.assertEqual(202, rsp.status_code, rsp.content)
presentation = Presentation.objects.get(pk=self.presentation.pk)
self.assertEqual(presentation.assets_url, TEST_DATA['assets_url'])
self.assertEqual(presentation.video_url, self.presentation.video_url)
self.assertEqual(presentation.slides_url, self.presentation.slides_url)
response_data = json.loads(rsp.content)
self.assertEqual({'code': 202, 'data': {'message': 'Talk updated.'}},
response_data)
def test_change_all_urls(self):
# A valid request
TEST_DATA = {
'assets_url': 'http://example.com',
'video_url': 'https://v.example.com',
'slides_url': 'http://superslide.toys'
}
rsp = self.post_raw_data(self.url, post_data=json.dumps(TEST_DATA))
self.assertEqual(202, rsp.status_code, rsp.content)
presentation = Presentation.objects.get(pk=self.presentation.pk)
self.assertEqual(presentation.assets_url, TEST_DATA['assets_url'])
self.assertEqual(presentation.video_url, TEST_DATA['video_url'])
self.assertEqual(presentation.slides_url, TEST_DATA['slides_url'])
response_data = json.loads(rsp.content)
self.assertEqual({'code': 202, 'data': {'message': 'Talk updated.'}},
response_data)
def test_invalid_url(self):
TEST_DATA = {'video_url': 'Foo! Bar! Sis boom bah!'}
rsp = self.post_raw_data(self.url, post_data=json.dumps(TEST_DATA))
self.assertEqual(400, rsp.status_code, rsp.content)
response_data = json.loads(rsp.content)
self.assertEqual({'code': 400, 'data': {'error': {'video_url': ['Enter a valid URL.']}}},
response_data)
|
njl/pycon
|
pycon/pycon_api/tests.py
|
Python
|
bsd-3-clause
| 21,334
|
[
"MOE"
] |
d9f1a46c1cd2df522bf44f2a5481ea207524a5c8feae6e3f7f0286c848016ceb
|
from __future__ import (absolute_import, division, print_function)
try:
import pathos.multiprocessing as mp
PATHOS_FOUND = True
except ImportError:
PATHOS_FOUND = False
import numpy as np
import six
import os
from mantid.api import AlgorithmFactory, FileAction, FileProperty, PythonAlgorithm, Progress, WorkspaceProperty, mtd
from mantid.api import WorkspaceFactory, AnalysisDataService
# noinspection PyProtectedMember
from mantid.api._api import WorkspaceGroup
from mantid.simpleapi import CloneWorkspace, GroupWorkspaces, SaveAscii, Load
from mantid.kernel import logger, StringListValidator, Direction, StringArrayProperty, Atom
import AbinsModules
# noinspection PyPep8Naming,PyMethodMayBeStatic
class Abins(PythonAlgorithm):
_dft_program = None
_phonon_file = None
_experimental_file = None
_temperature = None
_scale = None
_sample_form = None
_instrument_name = None
_atoms = None
_sum_contributions = None
_scale_by_cross_section = None
_calc_partial = None
_out_ws_name = None
_num_quantum_order_events = None
_extracted_dft_data = None
def category(self):
return "Simulation"
# ----------------------------------------------------------------------------------------
def summary(self):
return "Calculates inelastic neutron scattering."
# ----------------------------------------------------------------------------------------
def PyInit(self):
# Declare all properties
self.declareProperty(name="DFTprogram",
direction=Direction.Input,
defaultValue="CASTEP",
validator=StringListValidator(["CASTEP", "CRYSTAL"]),
doc="DFT program which was used for a phonon calculation.")
self.declareProperty(FileProperty("PhononFile", "",
action=FileAction.Load,
direction=Direction.Input,
extensions=["phonon", "out"]),
doc="File with the data from a phonon calculation.")
self.declareProperty(FileProperty("ExperimentalFile", "",
action=FileAction.OptionalLoad,
direction=Direction.Input,
extensions=["raw", "dat"]),
doc="File with the experimental inelastic spectrum to compare.")
self.declareProperty(name="Temperature",
direction=Direction.Input,
defaultValue=10.0,
doc="Temperature in K for which dynamical structure factor S should be calculated.")
self.declareProperty(name="Scale", defaultValue=1.0,
doc='Scale the intensity by the given factor. Default is no scaling.')
self.declareProperty(name="SampleForm",
direction=Direction.Input,
defaultValue="Powder",
validator=StringListValidator(AbinsModules.AbinsConstants.ALL_SAMPLE_FORMS),
# doc="Form of the sample: SingleCrystal or Powder.")
doc="Form of the sample: Powder.")
self.declareProperty(name="Instrument",
direction=Direction.Input,
defaultValue="TOSCA",
# validator=StringListValidator(AbinsModules.AbinsConstants.ALL_INSTRUMENTS)
validator=StringListValidator(["TOSCA"]),
doc="Name of an instrument for which analysis should be performed.")
self.declareProperty(StringArrayProperty("Atoms", Direction.Input),
doc="List of atoms to use to calculate partial S."
"If left blank, workspaces with S for all types of atoms will be calculated.")
self.declareProperty(name="SumContributions", defaultValue=False,
doc="Sum the partial dynamical structure factors into a single workspace.")
self.declareProperty(name="ScaleByCrossSection", defaultValue='Incoherent',
validator=StringListValidator(['Total', 'Incoherent', 'Coherent']),
doc="Scale the partial dynamical structure factors by the scattering cross section.")
self.declareProperty(name="QuantumOrderEventsNumber", defaultValue='1',
validator=StringListValidator(['1', '2', '3', '4']),
doc="Number of quantum order effects included in the calculation "
"(1 -> FUNDAMENTALS, 2-> first overtone + FUNDAMENTALS + "
"2nd order combinations, 3-> FUNDAMENTALS + first overtone + second overtone + 2nd "
"order combinations + 3rd order combinations etc...)")
self.declareProperty(WorkspaceProperty("OutputWorkspace", '', Direction.Output),
doc="Name to give the output workspace.")
def validateInputs(self):
"""
Performs input validation. Use to ensure the user has defined a consistent set of parameters.
"""
input_file_validators = {"CASTEP": self._validate_castep_input_file,
"CRYSTAL": self._validate_crystal_input_file}
issues = dict()
temperature = self.getProperty("Temperature").value
if temperature < 0:
issues["Temperature"] = "Temperature must be positive."
scale = self.getProperty("Scale").value
if scale < 0:
issues["Scale"] = "Scale must be positive."
dft_program = self.getProperty("DFTprogram").value
phonon_filename = self.getProperty("PhononFile").value
output = input_file_validators[dft_program](filename_full_path=phonon_filename)
if output["Invalid"]:
issues["PhononFile"] = output["Comment"]
workspace_name = self.getPropertyValue("OutputWorkspace")
# list of special keywords which cannot be used in the name of workspace
forbidden_keywords = ["total"]
if workspace_name in mtd:
issues["OutputWorkspace"] = "Workspace with name " + workspace_name + " already in use; please give " \
"a different name for workspace."
elif workspace_name == "":
issues["OutputWorkspace"] = "Please specify name of workspace."
for word in forbidden_keywords:
if word in workspace_name:
issues["OutputWorkspace"] = "Keyword: " + word + " cannot be used in the name of workspace."
break
self._check_advanced_parameter()
return issues
def PyExec(self):
# 0) Create reporter to report progress
steps = 9
begin = 0
end = 1.0
prog_reporter = Progress(self, begin, end, steps)
# 1) get input parameters from a user
self._get_properties()
prog_reporter.report("Input data from the user has been collected.")
# 2) read DFT data
dft_loaders = {"CASTEP": AbinsModules.LoadCASTEP, "CRYSTAL": AbinsModules.LoadCRYSTAL}
dft_reader = dft_loaders[self._dft_program](input_dft_filename=self._phonon_file)
dft_data = dft_reader.get_formatted_data()
prog_reporter.report("Phonon data has been read.")
# 3) calculate S
s_calculator = AbinsModules.CalculateS.init(filename=self._phonon_file, temperature=self._temperature,
sample_form=self._sample_form, abins_data=dft_data,
instrument=self._instrument,
quantum_order_num=self._num_quantum_order_events)
s_data = s_calculator.get_formatted_data()
prog_reporter.report("Dynamical structure factors have been determined.")
# 4) get atoms for which S should be plotted
self._extracted_dft_data = dft_data.get_atoms_data().extract()
num_atoms = len(self._extracted_dft_data)
all_atms_smbls = list(set([self._extracted_dft_data["atom_%s" % atom]["symbol"] for atom in range(num_atoms)]))
all_atms_smbls.sort()
if len(self._atoms) == 0: # case: all atoms
atoms_symbol = all_atms_smbls
else: # case selected atoms
if len(self._atoms) != len(set(self._atoms)): # only different types
raise ValueError("Not all user defined atoms are unique.")
for atom_symbol in self._atoms:
if atom_symbol not in all_atms_smbls:
raise ValueError("User defined atom not present in the system.")
atoms_symbol = self._atoms
prog_reporter.report("Atoms, for which dynamical structure factors should be plotted, have been determined.")
# at the moment only types of atom, e.g, for benzene three options -> 1) C, H; 2) C; 3) H
# 5) create workspaces for atoms in interest
workspaces = []
if self._sample_form == "Powder":
workspaces.extend(self._create_partial_s_per_type_workspaces(atoms_symbols=atoms_symbol, s_data=s_data))
prog_reporter.report("Workspaces with partial dynamical structure factors have been constructed.")
# 6) Create a workspace with sum of all atoms if required
if self._sum_contributions:
total_atom_workspaces = []
for ws in workspaces:
if "total" in ws:
total_atom_workspaces.append(ws)
total_workspace = self._create_total_workspace(partial_workspaces=total_atom_workspaces)
workspaces.insert(0, total_workspace)
prog_reporter.report("Workspace with total S has been constructed.")
# 7) add experimental data if available to the collection of workspaces
if self._experimental_file != "":
workspaces.insert(0, self._create_experimental_data_workspace().name())
prog_reporter.report("Workspace with the experimental data has been constructed.")
GroupWorkspaces(InputWorkspaces=workspaces, OutputWorkspace=self._out_ws_name)
# 8) save workspaces to ascii_file
num_workspaces = mtd[self._out_ws_name].getNumberOfEntries()
for wrk_num in range(num_workspaces):
wrk = mtd[self._out_ws_name].getItem(wrk_num)
SaveAscii(InputWorkspace=wrk, Filename=wrk.name() + ".dat", Separator="Space", WriteSpectrumID=False)
prog_reporter.report("All workspaces have been saved to ASCII files.")
# 9) set OutputWorkspace
self.setProperty('OutputWorkspace', self._out_ws_name)
prog_reporter.report("Group workspace with all required dynamical structure factors has been constructed.")
def _create_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Creates both partial and total workspaces for all types of atoms.
@param atoms_symbols: list of atom types for which S should be created
@param s_data: dynamical factor data of type SData
@return: workspaces for list of atoms types, S for the particular type of atom
"""
s_data_extracted = s_data.extract()
shape = [self._num_quantum_order_events]
shape.extend(list(s_data_extracted["atom_0"]["s"]["order_1"].shape))
s_atom_data = np.zeros(shape=tuple(shape), dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
shape.pop(0)
num_atoms = len([key for key in s_data_extracted.keys() if "atom" in key])
temp_s_atom_data = np.copy(s_atom_data)
result = []
for atom_symbol in atoms_symbols:
# create partial workspaces for the given type of atom
atom_workspaces = []
s_atom_data.fill(0.0)
for atom in range(num_atoms):
if self._extracted_dft_data["atom_%s" % atom]["symbol"] == atom_symbol:
temp_s_atom_data.fill(0.0)
for order in range(AbinsModules.AbinsConstants.FUNDAMENTALS,
self._num_quantum_order_events + AbinsModules.AbinsConstants.S_LAST_INDEX):
order_indx = order - AbinsModules.AbinsConstants.PYTHON_INDEX_SHIFT
temp_s_order = s_data_extracted["atom_%s" % atom]["s"]["order_%s" % order]
temp_s_atom_data[order_indx] = temp_s_order
s_atom_data += temp_s_atom_data # sum S over the atoms of the same type
total_s_atom_data = np.sum(s_atom_data, axis=0)
atom_workspaces.append(
self._create_workspace(atom_name=atom_symbol, s_points=np.copy(total_s_atom_data),
optional_name="_total"))
atom_workspaces.append(
self._create_workspace(atom_name=atom_symbol, s_points=np.copy(s_atom_data)))
result.extend(atom_workspaces)
return result
def _create_partial_s_per_type_workspaces(self, atoms_symbols=None, s_data=None):
"""
Creates workspaces for all types of atoms. Each workspace stores quantum order events for S for the given
type of atom. It also stores total workspace for the given type of atom.
@param atoms_symbols: list of atom types for which quantum order events of S should be calculated
@param s_data: dynamical factor data of type SData
@return: workspaces for list of atoms types, each workspace contains quantum order events of
S for the particular atom type
"""
return self._create_workspaces(atoms_symbols=atoms_symbols, s_data=s_data)
def _fill_s_workspace(self, s_points=None, workspace=None, atom_name=None):
"""
Puts S into workspace(s).
@param s_points: dynamical factor for the given atom
@param workspace: workspace to be filled with S
"""
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
# only FUNDAMENTALS
if s_points.shape[0] == AbinsModules.AbinsConstants.FUNDAMENTALS:
self._fill_s_1d_workspace(s_points=s_points[0], workspace=workspace, atom_name=atom_name)
# total workspaces
elif len(s_points.shape) == AbinsModules.AbinsConstants.ONE_DIMENSIONAL_SPECTRUM:
self._fill_s_1d_workspace(s_points=s_points, workspace=workspace, atom_name=atom_name)
# quantum order events (fundamentals or overtones + combinations for the given order)
else:
dim = s_points.shape[0]
partial_wrk_names = []
for n in range(dim):
seed = "quantum_event_%s" % (n + 1)
wrk_name = workspace + "_" + seed
partial_wrk_names.append(wrk_name)
self._fill_s_1d_workspace(s_points=s_points[n], workspace=wrk_name, atom_name=atom_name)
GroupWorkspaces(InputWorkspaces=partial_wrk_names, OutputWorkspace=workspace)
def _fill_s_1d_workspace(self, s_points=None, workspace=None, atom_name=None):
"""
Puts 1D S into workspace.
:param s_points: dynamical factor for the given atom
:param workspace: workspace to be filled with S
:param atom_name: name of atom (for example H for hydrogen)
"""
if atom_name is not None:
width = AbinsModules.AbinsParameters.bin_width
s_points = s_points * self._scale * self._get_cross_section(atom_name=atom_name) * width
dim = 1
length = s_points.size
wrk = WorkspaceFactory.create("Workspace2D", NVectors=dim, XLength=length + 1, YLength=length)
wrk.setX(0, self._bins)
wrk.setY(0, s_points)
AnalysisDataService.addOrReplace(workspace, wrk)
# Set correct units on workspace
self._set_workspace_units(wrk=workspace)
def _get_cross_section(self, atom_name=None):
"""
Calculates cross section for the given element.
:param atom_name: symbol of element
:return: cross section for that element
"""
atom = Atom(symbol=atom_name)
cross_section = None
if self._scale_by_cross_section == 'Incoherent':
cross_section = atom.neutron()["inc_scatt_xs"]
elif self._scale_by_cross_section == 'Coherent':
cross_section = atom.neutron()["coh_scatt_xs"]
elif self._scale_by_cross_section == 'Total':
cross_section = atom.neutron()["tot_scatt_xs"]
return cross_section
def _create_total_workspace(self, partial_workspaces=None):
"""
Sets workspace with total S.
:param partial_workspaces: list of workspaces which should be summed up to obtain total workspace
:return: workspace with total S from partial_workspaces
"""
total_workspace = self._out_ws_name + "_total"
if isinstance(mtd[partial_workspaces[0]], WorkspaceGroup):
local_partial_workspaces = mtd[partial_workspaces[0]].names()
else:
local_partial_workspaces = partial_workspaces
if len(local_partial_workspaces) > 1:
# get frequencies
ws = mtd[local_partial_workspaces[0]]
# initialize S
s_atoms = np.zeros_like(ws.dataY(0))
# collect all S
for partial_ws in local_partial_workspaces:
if self._instrument.get_name() in AbinsModules.AbinsConstants.ONE_DIMENSIONAL_INSTRUMENTS:
s_atoms += mtd[partial_ws].dataY(0)
# create workspace with S
self._fill_s_workspace(s_atoms, total_workspace)
# # Otherwise just repackage the workspace we have as the total
else:
CloneWorkspace(InputWorkspace=local_partial_workspaces[0], OutputWorkspace=total_workspace)
return total_workspace
def _create_workspace(self, atom_name=None, s_points=None, optional_name=""):
"""
Creates workspace for the given frequencies and s_points with S data. After workspace is created it is rebined,
scaled by cross-section factor and optionally multiplied by the user defined scaling factor.
@param atom_name: symbol of atom for which workspace should be created
@param frequencies: frequencies in the form of numpy array for which S(Q, omega) can be plotted
@param s_points: S(Q, omega)
@param optional_name: optional part of workspace name
@return: workspace for the given frequency and S data
"""
ws_name = self._out_ws_name + "_" + atom_name + optional_name
self._fill_s_workspace(s_points=s_points, workspace=ws_name, atom_name=atom_name)
return ws_name
def _create_experimental_data_workspace(self):
"""
Loads experimental data into workspaces.
@return: workspace with experimental data
"""
experimental_wrk = Load(self._experimental_file)
self._set_workspace_units(wrk=experimental_wrk.name())
return experimental_wrk
def _set_workspace_units(self, wrk=None):
"""
Sets x and y units for a workspace.
:param wrk: workspace which units should be set
"""
mtd[wrk].getAxis(0).setUnit("DeltaE_inWavenumber")
mtd[wrk].setYUnitLabel("S /Arbitrary Units")
mtd[wrk].setYUnit("Arbitrary Units")
def _check_advanced_parameter(self):
"""
Checks if parameters from AbinsParameters.py are valid. If any parameter is invalid then RuntimeError is thrown
with meaningful message.
"""
message = " in AbinsParameters.py. "
self._check_general_resolution(message)
self._check_tosca_parameters(message)
self._check_folder_names(message)
self._check_rebining(message)
self._check_threshold(message)
self._check_chunk_size(message)
self._check_threads(message)
def _check_general_resolution(self, message_end=None):
"""
Checks general parameters used in construction resolution functions.
:param message_end: closing part of the error message.
"""
# check fwhm
fwhm = AbinsModules.AbinsParameters.fwhm
if not (isinstance(fwhm, float) and 0.0 < fwhm < 10.0):
raise RuntimeError("Invalid value of fwhm" + message_end)
# check delta_width
delta_width = AbinsModules.AbinsParameters.delta_width
if not (isinstance(delta_width, float) and 0.0 < delta_width < 1.0):
raise RuntimeError("Invalid value of delta_width" + message_end)
def _check_tosca_parameters(self, message_end=None):
"""
Checks TOSCA parameters.
:param message_end: closing part of the error message.
"""
# TOSCA final energy in cm^-1
final_energy = AbinsModules.AbinsParameters.tosca_final_neutron_energy
if not (isinstance(final_energy, float) and final_energy > 0.0):
raise RuntimeError("Invalid value of final_neutron_energy for TOSCA" + message_end)
angle = AbinsModules.AbinsParameters.tosca_cos_scattering_angle
if not isinstance(angle, float):
raise RuntimeError("Invalid value of cosines scattering angle for TOSCA" + message_end)
resolution_const_a = AbinsModules.AbinsParameters.tosca_a
if not isinstance(resolution_const_a, float):
raise RuntimeError("Invalid value of constant A for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_b = AbinsModules.AbinsParameters.tosca_b
if not isinstance(resolution_const_b, float):
raise RuntimeError("Invalid value of constant B for TOSCA (used by the resolution TOSCA function)" +
message_end)
resolution_const_c = AbinsModules.AbinsParameters.tosca_c
if not isinstance(resolution_const_c, float):
raise RuntimeError("Invalid value of constant C for TOSCA (used by the resolution TOSCA function)" +
message_end)
def _check_folder_names(self, message_end=None):
"""
Checks folders names.
:param message_end: closing part of the error message.
"""
folder_names = []
dft_group = AbinsModules.AbinsParameters.dft_group
if not isinstance(dft_group, str) or dft_group == "":
raise RuntimeError("Invalid name for folder in which the DFT data should be stored.")
folder_names.append(dft_group)
powder_data_group = AbinsModules.AbinsParameters.powder_data_group
if not isinstance(powder_data_group, str) or powder_data_group == "":
raise RuntimeError("Invalid value of powder_data_group" + message_end)
elif powder_data_group in folder_names:
raise RuntimeError("Name for powder_data_group already used by as name of another folder.")
folder_names.append(powder_data_group)
crystal_data_group = AbinsModules.AbinsParameters.crystal_data_group
if not isinstance(crystal_data_group, str) or crystal_data_group == "":
raise RuntimeError("Invalid value of crystal_data_group" + message_end)
elif crystal_data_group in folder_names:
raise RuntimeError("Name for crystal_data_group already used as a name of another folder.")
s_data_group = AbinsModules.AbinsParameters.s_data_group
if not isinstance(s_data_group, str) or s_data_group == "":
raise RuntimeError("Invalid value of s_data_group" + message_end)
elif s_data_group in folder_names:
raise RuntimeError("Name for s_data_group already used as a name of another folder.")
def _check_rebining(self, message_end=None):
"""
Checks rebinning parameters.
:param message_end: closing part of the error message.
"""
pkt_per_peak = AbinsModules.AbinsParameters.pkt_per_peak
if not (isinstance(pkt_per_peak, six.integer_types) and 1 <= pkt_per_peak <= 1000):
raise RuntimeError("Invalid value of pkt_per_peak" + message_end)
# bin width is expressed in cm^-1
bin_width = AbinsModules.AbinsParameters.bin_width
if not (isinstance(bin_width, float) and 1.0 <= bin_width <= 10.0):
raise RuntimeError("Invalid value of bin_width" + message_end)
min_wavenumber = AbinsModules.AbinsParameters.min_wavenumber
if not (isinstance(min_wavenumber, float) and min_wavenumber >= 0.0):
raise RuntimeError("Invalid value of min_wavenumber" + message_end)
max_wavenumber = AbinsModules.AbinsParameters.max_wavenumber
if not (isinstance(max_wavenumber, float) and max_wavenumber > 0.0):
raise RuntimeError("Invalid number of max_wavenumber" + message_end)
if min_wavenumber > max_wavenumber:
raise RuntimeError("Invalid energy window for rebinning.")
def _check_threshold(self, message_end=None):
"""
Checks acoustic phonon threshold.
:param message_end: closing part of the error message.
"""
acoustic_threshold = AbinsModules.AbinsParameters.acoustic_phonon_threshold
if not (isinstance(acoustic_threshold, float) and acoustic_threshold >= 0.0):
raise RuntimeError("Invalid value of acoustic_phonon_threshold" + message_end)
# check s threshold
s_absolute_threshold = AbinsModules.AbinsParameters.s_absolute_threshold
if not (isinstance(s_absolute_threshold, float) and s_absolute_threshold > 0.0):
raise RuntimeError("Invalid value of s_absolute_threshold" + message_end)
s_relative_threshold = AbinsModules.AbinsParameters.s_relative_threshold
if not (isinstance(s_relative_threshold, float) and s_relative_threshold > 0.0):
raise RuntimeError("Invalid value of s_relative_threshold" + message_end)
def _check_chunk_size(self, message_end=None):
"""
Check optimal size of chunk
:param message_end: closing part of the error message.
"""
optimal_size = AbinsModules.AbinsParameters.optimal_size
if not (isinstance(optimal_size, six.integer_types) and optimal_size > 0):
raise RuntimeError("Invalid value of optimal_size" + message_end)
def _check_threads(self, message_end=None):
"""
Checks number of threads
:param message_end: closing part of the error message.
"""
if PATHOS_FOUND:
threads = AbinsModules.AbinsParameters.threads
if not (isinstance(threads, six.integer_types) and 1 <= threads <= mp.cpu_count()):
raise RuntimeError("Invalid number of threads for parallelisation over atoms" + message_end)
def _validate_crystal_input_file(self, filename_full_path=None):
"""
Method to validate input file for CRYSTAL DFT program.
@param filename_full_path: full path of a file to check.
@return: True if file is valid otherwise false.
"""
logger.information("Validate CRYSTAL phonon file: ")
output = {"Invalid": False, "Comment": ""}
msg_err = "Invalid %s file. " % filename_full_path
msg_rename = "Please rename your file and try again."
# check extension of a file
filename_ext = os.path.splitext(filename_full_path)[1]
if filename_ext != ".out":
return dict(Invalid=True,
Comment=msg_err + "Output from DFT program " + self._dft_program + " is expected." +
" The expected extension of file is .out . (found: " + filename_ext + ") " +
msg_rename)
return output
def _validate_castep_input_file(self, filename_full_path=None):
"""
Check if input DFT phonon file has been produced by CASTEP. Currently the crucial keywords in the first few
lines are checked (to be modified if a better validation is found...)
:param filename_full_path: full path of a file to check
:return: Dictionary with two entries "Invalid", "Comment". Valid key can have two values: True/ False. As it
comes to "Comment" it is an empty string if Valid:True, otherwise stores description of the problem.
"""
logger.information("Validate CASTEP phonon file: ")
output = {"Invalid": False, "Comment": ""}
msg_err = "Invalid %s file. " % filename_full_path
msg_rename = "Please rename your file and try again."
# check extension of a file
filename_ext = os.path.splitext(filename_full_path)[1]
if filename_ext != ".phonon":
return dict(Invalid=True,
Comment=msg_err + "Output from DFT program " + self._dft_program + " is expected." +
" The expected extension of file is .phonon . (found: " + filename_ext + ") " +
msg_rename)
# check a structure of the header part of file.
# Here fortran convention is followed: case of letter does not matter
with open(filename_full_path) as castep_file:
line = self._get_one_line(castep_file)
if not self._compare_one_line(line, "beginheader"): # first line is BEGIN header
return dict(Invalid=True, Comment=msg_err + "The first line should be 'BEGIN header'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofions"):
return dict(Invalid=True, Comment=msg_err + "The second line should include 'Number of ions'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofbranches"):
return dict(Invalid=True, Comment=msg_err + "The third line should include 'Number of branches'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line, pattern="numberofwavevectors"):
return dict(Invalid=True, Comment=msg_err + "The fourth line should include 'Number of wavevectors'.")
line = self._get_one_line(castep_file)
if not self._compare_one_line(one_line=line,
pattern="frequenciesin"):
return dict(Invalid=True, Comment=msg_err + "The fifth line should be 'Frequencies in'.")
return output
def _get_one_line(self, file_obj=None):
"""
:param file_obj: file object from which reading is done
:return: string containing one non empty line
"""
line = file_obj.readline().replace(" ", "").lower()
while line and line == "":
line = file_obj.readline().replace(" ", "").lower()
return line
def _compare_one_line(self, one_line, pattern):
"""
compares line in the the form of string with a pattern.
:param one_line: line in the for mof string to be compared
:param pattern: string which should be present in the line after removing white spaces and setting all
letters to lower case
:return: True is pattern present in the line, otherwise False
"""
return one_line and pattern in one_line.replace(" ", "")
def _get_properties(self):
"""
Loads all properties to object's attributes.
"""
self._dft_program = self.getProperty("DFTprogram").value
self._phonon_file = self.getProperty("PhononFile").value
self._experimental_file = self.getProperty("ExperimentalFile").value
self._temperature = self.getProperty("Temperature").value
self._scale = self.getProperty("Scale").value
self._sample_form = self.getProperty("SampleForm").value
instrument_name = self.getProperty("Instrument").value
if instrument_name in AbinsModules.AbinsConstants.ALL_INSTRUMENTS:
self._instrument_name = instrument_name
instrument_producer = AbinsModules.InstrumentProducer()
self._instrument = instrument_producer.produce_instrument(name=self._instrument_name)
else:
raise ValueError("Unknown instrument %s" % instrument_name)
self._atoms = self.getProperty("Atoms").value
self._sum_contributions = self.getProperty("SumContributions").value
# conversion from str to int
self._num_quantum_order_events = int(self.getProperty("QuantumOrderEventsNumber").value)
self._scale_by_cross_section = self.getPropertyValue('ScaleByCrossSection')
self._out_ws_name = self.getPropertyValue('OutputWorkspace')
self._calc_partial = (len(self._atoms) > 0)
# user defined interval is exclusive with respect to
# AbinsModules.AbinsParameters.min_wavenumber
# AbinsModules.AbinsParameters.max_wavenumber
# with bin width AbinsModules.AbinsParameters.bin_width
step = AbinsModules.AbinsParameters.bin_width
start = AbinsModules.AbinsParameters.min_wavenumber + step / 2.0
stop = AbinsModules.AbinsParameters.max_wavenumber + step / 2.0
self._bins = np.arange(start=start, stop=stop, step=step, dtype=AbinsModules.AbinsConstants.FLOAT_TYPE)
try:
AlgorithmFactory.subscribe(Abins)
except ImportError:
logger.debug('Failed to subscribe algorithm SimulatedDensityOfStates; The python package may be missing.')
|
wdzhou/mantid
|
Framework/PythonInterface/plugins/algorithms/Abins.py
|
Python
|
gpl-3.0
| 34,058
|
[
"CASTEP",
"CRYSTAL"
] |
0e3612e57454ca828d49d4f236eb78396f0624b4f0c4c13635c454df5cdf90b6
|
#!/usr/bin/env python -Es
"""
Script to set up a custom genome for bcbio-nextgen
"""
import argparse
from argparse import ArgumentParser
import os
import toolz as tz
from bcbio.utils import safe_makedir, file_exists, chdir
from bcbio.pipeline import config_utils
from bcbio.distributed.transaction import file_transaction
from bcbio.provenance import do
from bcbio.install import (REMOTES, get_cloudbiolinux, SUPPORTED_GENOMES, SUPPORTED_INDEXES,
_get_data_dir)
from bcbio.galaxy import loc
from fabric.api import *
import subprocess
import sys
import shutil
import yaml
import gffutils
from gffutils.iterators import DataIterator
import tempfile
SEQ_DIR = "seq"
RNASEQ_DIR = "rnaseq"
SRNASEQ_DIR = "srnaseq"
ERCC_BUCKET = "bcbio-data.s3.amazonaws.com/"
def gff3_to_gtf(gff3_file):
dialect = {'field separator': '; ',
'fmt': 'gtf',
'keyval separator': ' ',
'leading semicolon': False,
'multival separator': ',',
'quoted GFF2 values': True,
'order': ['gene_id', 'transcript_id'],
'repeated keys': False,
'trailing semicolon': True}
out_file = os.path.splitext(gff3_file)[0] + ".gtf"
if file_exists(out_file):
return out_file
print "Converting %s to %s." %(gff3_file, out_file)
db = gffutils.create_db(gff3_file, ":memory:")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
for feature in DataIterator(db.features_of_type("exon"), dialect=dialect):
transcript_id = feature["Parent"][0]
gene_id = db[transcript_id]["Parent"][0]
attr = {"transcript_id": transcript_id, "gene_id": gene_id}
attributes = gffutils.attributes.Attributes(attr)
feature.attributes = attributes
print >> out_handle, feature
return out_file
def _index_w_command(dir_name, command, ref_file, ext=None):
index_name = os.path.splitext(os.path.basename(ref_file))[0]
if ext is not None: index_name += ext
build_path = os.path.join(os.path.dirname(ref_file), os.pardir)
out_dir = os.path.join(build_path, dir_name)
index_path = os.path.join(out_dir, index_name)
if not env.safe_exists(out_dir):
env.safe_run("mkdir %s" % out_dir)
subprocess.check_call(command.format(ref_file=ref_file,
index_name=index_path), shell=True)
return index_path
def setup_base_directories(genome_dir, name, build, gtf=None):
name_dir = os.path.join(genome_dir, name)
safe_makedir(name_dir)
build_dir = os.path.join(name_dir, build)
safe_makedir(build_dir)
seq_dir = os.path.join(build_dir, SEQ_DIR)
safe_makedir(seq_dir)
if gtf:
gtf_dir = os.path.join(build_dir, RNASEQ_DIR)
safe_makedir(gtf_dir)
return build_dir
def install_fasta_file(build_dir, fasta, build):
out_file = os.path.join(build_dir, SEQ_DIR, build + ".fa")
if not os.path.exists(out_file):
shutil.copyfile(fasta, out_file)
return out_file
def install_gtf_file(build_dir, gtf, build):
out_file = os.path.join(build_dir, RNASEQ_DIR, "ref-transcripts.gtf")
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
return out_file
def install_srna(species, gtf):
out_file = os.path.join(SRNASEQ_DIR, "srna-transcripts.gtf")
safe_makedir(SRNASEQ_DIR)
if not os.path.exists(out_file):
shutil.copyfile(gtf, out_file)
try:
from seqcluster import install
except ImportError:
raise ImportError("install seqcluster first, please.")
with chdir(SRNASEQ_DIR):
hairpin, miRNA = install._install_mirbase()
cmd = ("grep -A 2 {species} {hairpin} | grep -v '\-\-$' | tr U T > hairpin.fa")
do.run(cmd.format(**locals()), "set precursor.")
cmd = ("grep -A 1 {species} {miRNA} > miRNA.str")
do.run(cmd.format(**locals()), "set miRNA.")
shutil.rmtree("mirbase")
return out_file
def append_ercc(gtf_file, fasta_file):
ercc_fa = ERCC_BUCKET + "ERCC92.fasta.gz"
tmp_fa = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_fa_cmd = "wget {ercc_fa} -O {tmp_fa}; gzip -cd {tmp_fa} >> {fasta_file}"
print append_fa_cmd.format(**locals())
subprocess.check_call(append_fa_cmd.format(**locals()), shell=True)
ercc_gtf = ERCC_BUCKET + "ERCC92.gtf.gz"
tmp_gtf = tempfile.NamedTemporaryFile(delete=False, suffix=".gz").name
append_gtf_cmd = "wget {ercc_gtf} -O {tmp_gtf}; gzip -cd {tmp_gtf} >> {gtf_file}"
print append_gtf_cmd.format(**locals())
subprocess.check_call(append_gtf_cmd.format(**locals()), shell=True)
if __name__ == "__main__":
description = ("Set up a custom genome for bcbio-nextgen. This will "
"place the genome under name/build in the genomes "
"directory in your bcbio-nextgen installation.")
parser = ArgumentParser(description=description)
parser.add_argument("-f", "--fasta", required=True,
help="FASTA file of the genome.")
parser.add_argument("--gff3", default=False, action='store_true',
help="File is a GFF3 file.")
parser.add_argument("-g", "--gtf", default=None,
help="GTF file of the transcriptome")
parser.add_argument("-n", "--name", required=True,
help="Name of organism, for example Hsapiens.")
parser.add_argument("-b", "--build", required=True,
help="Build of genome, for example hg19.")
parser.add_argument("-i", "--indexes", choices=SUPPORTED_INDEXES, nargs="*",
default=["seq"], help="Space separated list of indexes to make")
parser.add_argument("--ercc", action='store_true', default=False,
help="Add ERCC spike-ins.")
parser.add_argument("--mirbase", help="species in mirbase for smallRNAseq data.")
parser.add_argument("--srna_gtf", help="gtf to use for smallRNAseq data.")
args = parser.parse_args()
if not all([args.mirbase, args.srna_gtf]) and any([args.mirbase, args.srna_gtf]):
raise ValueError("--mirbase and --srna_gtf both need a value.")
env.hosts = ["localhost"]
os.environ["PATH"] += os.pathsep + os.path.dirname(sys.executable)
cbl = get_cloudbiolinux(REMOTES)
sys.path.insert(0, cbl["dir"])
genomemod = __import__("cloudbio.biodata", fromlist=["genomes"])
# monkey patch cloudbiolinux to use this indexing command instead
genomes = getattr(genomemod, 'genomes')
genomes._index_w_command = _index_w_command
fabmod = __import__("cloudbio", fromlist=["fabutils"])
fabutils = getattr(fabmod, 'fabutils')
fabutils.configure_runsudo(env)
system_config = os.path.join(_get_data_dir(), "galaxy", "bcbio_system.yaml")
with open(system_config) as in_handle:
config = yaml.load(in_handle)
env.picard_home = config_utils.get_program("picard", config, ptype="dir")
genome_dir = os.path.abspath(os.path.join(_get_data_dir(), "genomes"))
args.fasta = os.path.abspath(args.fasta)
args.gtf = os.path.abspath(args.gtf) if args.gtf else None
if args.gff3:
args.gtf = gff3_to_gtf(args.gtf)
# always make a sequence dictionary
if "seq" not in args.indexes:
args.indexes.append("seq")
env.system_install = genome_dir
prepare_tx = os.path.join(cbl["dir"], "utils", "prepare_tx_gff.py")
print "Creating directories using %s as the base." % (genome_dir)
build_dir = setup_base_directories(genome_dir, args.name, args.build, args.gtf)
os.chdir(build_dir)
print "Genomes will be installed into %s." % (build_dir)
fasta_file = install_fasta_file(build_dir, args.fasta, args.build)
print "Installed genome as %s." % (fasta_file)
if args.gtf:
if "bowtie2" not in args.indexes:
args.indexes.append("bowtie2")
gtf_file = install_gtf_file(build_dir, args.gtf, args.build)
print "Installed GTF as %s." % (gtf_file)
if args.ercc:
print "Appending ERCC sequences to %s and %s." % (gtf_file, fasta_file)
append_ercc(gtf_file, fasta_file)
indexed = {}
for index in args.indexes:
print "Creating the %s index." % (index)
index_fn = genomes.get_index_fn(index)
if not index_fn:
print "Do not know how to make the index %s, skipping." % (index)
continue
indexed[index] = index_fn(fasta_file)
indexed["samtools"] = fasta_file
if args.gtf:
"Preparing transcriptome."
with chdir(os.path.join(build_dir, os.pardir)):
cmd = ("{sys.executable} {prepare_tx} --genome-dir {genome_dir} --gtf {gtf_file} {args.name} {args.build}")
subprocess.check_call(cmd.format(**locals()), shell=True)
if args.mirbase:
"Preparing smallRNA data."
with chdir(os.path.join(build_dir)):
install_srna(args.mirbase, args.srna_gtf)
base_dir = os.path.normpath(os.path.dirname(fasta_file))
resource_file = os.path.join(base_dir, "%s-resources.yaml" % args.build)
print "Dumping genome resources to %s." % resource_file
resource_dict = {"version": 1}
if args.gtf:
transcripts = ["rnaseq", "transcripts"]
mask = ["rnaseq", "transcripts_mask"]
index = ["rnaseq", "transcriptome_index", "tophat"]
dexseq = ["rnaseq", "dexseq"]
refflat = ["rnaseq", "refflat"]
rRNA_fa = ["rnaseq", "rRNA_fa"]
resource_dict = tz.update_in(resource_dict, transcripts,
lambda x: "../rnaseq/ref-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, mask,
lambda x: "../rnaseq/ref-transcripts-mask.gtf")
resource_dict = tz.update_in(resource_dict, index,
lambda x: "../rnaseq/tophat/%s_transcriptome.ver" % args.build)
resource_dict = tz.update_in(resource_dict, refflat,
lambda x: "../rnaseq/ref-transcripts.refFlat")
resource_dict = tz.update_in(resource_dict, dexseq,
lambda x: "../rnaseq/ref-transcripts.dexseq.gff3")
resource_dict = tz.update_in(resource_dict, rRNA_fa,
lambda x: "../rnaseq/rRNA.fa")
if args.mirbase:
srna_gtf = ["srnaseq", "srna-transcripts"]
srna_mirbase = ["srnaseq", "mirbase"]
resource_dict = tz.update_in(resource_dict, srna_gtf,
lambda x: "../srnaseq/srna-transcripts.gtf")
resource_dict = tz.update_in(resource_dict, srna_mirbase,
lambda x: "../srnaseq/hairpin.fa")
# write out resource dictionarry
with file_transaction(resource_file) as tx_resource_file:
with open(tx_resource_file, "w") as out_handle:
out_handle.write(yaml.dump(resource_dict, default_flow_style=False))
print "Updating Galaxy .loc files."
galaxy_base = os.path.join(_get_data_dir(), "galaxy")
for index, index_file in indexed.items():
loc.update_loc_file(galaxy_base, index, args.build, index_file)
|
lpantano/bcbio-nextgen
|
scripts/bcbio_setup_genome.py
|
Python
|
mit
| 11,347
|
[
"Galaxy"
] |
c7281d606187e7517b7ba9267b3791de5d2d22cee2adef70c40f0fd07712a81a
|
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 17 17:57:40 2016
@author: jdorvinen
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# <codecell>
# Model data fit: alpha=1.498, beta=-0.348, gamma=1.275
# Callaghan et al. used: alpha=21.46, beta=1.08, gamma=1.07
a = 12*1.498
b = 12*-0.348
c = 12*1.275
a_c = 21.46
b_c = 1.08
c_c = 1.07
w = 2*np.pi
rnv = np.random.random()
# Takes a random variable and can be used to find a value for Gi
# formulaG = '1 - np.exp(-(a*w*Gi \
# + b*(np.cos(w*te) - np.cos(w*(te + Gi))) \
# - c*(np.sin(w*te) - np.sin(w*(te + Gi))))/w)'
formulaG = '1 - np.exp(-({0}*w*Gi \
+ {1}*(np.cos(w*te) - np.cos(w*(te + Gi))) \
- {2}*(np.sin(w*te) - np.sin(w*(te + Gi))))/w)'
# Initial estimate of Gi. Obtained from the second order Taylor series
# expansion about Gi=0 of "formulaG"
formulaGi_0 = 'rnv / (a + b*np.sin(w*te[i-1]) + c*np.cos(w*te[i-1]))'
def func(te,Gi,a,b,c):
z = eval(formulaG.format(a,b,c))
return z
te = np.arange(0,1.01,0.01)
Gi = np.arange(0,1.01,0.01)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X, Y = np.meshgrid(te, Gi)
zs = np.array([func(te,Gi,a,b,c) for te,Gi in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
zs2 = np.array([func(te,Gi,a_c,b_c,c_c) for te,Gi in zip(np.ravel(X), np.ravel(Y))])
Z2 = zs2.reshape(X.shape)
#from mayavi import mlab
#s1 = mlab.mesh(X,Y,Z)
#s2 = mlab.mesh(X,Y,Z2)
#mlab.show
ax.plot_surface(X,Y,Z,
cmap = 'viridis_r',
rstride=1,
cstride=10,
alpha=1,
zorder=0,
linewidth=0)
#ax.plot_surface(X,Y,Z2, color='yellow', alpha=1, zorder=1)
ax.set_xlabel('TimeEnd')
ax.set_ylabel('Gi')
ax.set_zlabel('RNV')
plt.show()
|
jdorvi/MonteCarlos_SLC
|
calculate_gap.py
|
Python
|
mit
| 1,836
|
[
"Mayavi"
] |
370966dc87dedb9e0e1d25bb15282eb2a388e140cfbebd337868a79100d88cad
|
# Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions to initialize Tensor using a NumPy-like syntax."""
import warnings
from typing import Optional, Sequence, Tuple, Any, Union, Type, Callable, List
from typing import Text
import numpy as np
from tensornetwork.backends import abstract_backend
from tensornetwork import backend_contextmanager
from tensornetwork import backends
from tensornetwork.tensor import Tensor
AbstractBackend = abstract_backend.AbstractBackend
def initialize_tensor(fname: Text,
*fargs: Any,
backend: Optional[Union[Text, AbstractBackend]] = None,
**fkwargs: Any) -> Tensor:
"""Return a Tensor wrapping data obtained by an initialization function
implemented in a backend. The Tensor will have the same shape as the
underlying array that function generates, with all Edges dangling.
This function is not intended to be called directly, but doing so should
be safe enough.
Args:
fname: Name of the method of backend to call (a string).
*fargs: Positional arguments to the initialization method.
backend: The backend or its name.
**fkwargs: Keyword arguments to the initialization method.
Returns:
tensor: A Tensor wrapping data generated by
(the_backend).fname(*fargs, **fkwargs), with one dangling edge per
axis of data.
"""
if backend is None:
backend = backend_contextmanager.get_default_backend()
backend_obj = backends.backend_factory.get_backend(backend)
func = getattr(backend_obj, fname)
data = func(*fargs, **fkwargs)
tensor = Tensor(data, backend=backend)
return tensor
def eye(N: int,
dtype: Optional[Type[np.number]] = None,
M: Optional[int] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor representing a 2D array with ones on the diagonal and
zeros elsewhere. The Tensor has two dangling Edges.
Args:
N (int): The first dimension of the returned matrix.
dtype, optional: dtype of array (default np.float64).
M (int, optional): The second dimension of the returned matrix.
backend (optional): The backend or its name.
Returns:
I : Tensor of shape (N, M)
Represents an array of all zeros except for the k'th diagonal of all
ones.
"""
the_tensor = initialize_tensor("eye", N, backend=backend, dtype=dtype, M=M)
return the_tensor
def zeros(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of all zeros.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`. Represents an array of all zeros.
"""
the_tensor = initialize_tensor("zeros", shape, backend=backend, dtype=dtype)
return the_tensor
def ones(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of all ones.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape`
Represents an array of all ones.
"""
the_tensor = initialize_tensor("ones", shape, backend=backend, dtype=dtype)
return the_tensor
def ones_like(tensor: Union[Any],
dtype: Optional[Type[Any]] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor shape full of ones the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name."""
if backend is None:
backend = backend_contextmanager.get_default_backend()
else:
backend = backend_contextmanager.backend_factory.get_backend(backend)
if isinstance(tensor, Tensor):
the_tensor = initialize_tensor("ones", tensor.shape,
backend=tensor.backend, dtype=tensor.dtype)
else:
try:
tensor = backend.convert_to_tensor(tensor)
except TypeError as e:
error = "Input to zeros_like has invalid type causing " \
"error massage: \n" + str(e)
raise TypeError(error) from e
the_tensor = initialize_tensor("ones", tensor.get_shape().as_list(),
backend=backend, dtype=dtype)
return the_tensor
def zeros_like(tensor: Union[Any],
dtype: Optional[Any] = None,
backend: Optional[Union[Text,
AbstractBackend]] = None) -> Tensor:
"""Return a Tensor shape full of zeros the same shape as input
Args:
tensor : Object to recieve shape from
dtype (optional) : dtype of object
backend(optional): The backend or its name."""
if backend is None:
backend = backend_contextmanager.get_default_backend()
else:
backend = backend_contextmanager.backend_factory.get_backend(backend)
if isinstance(tensor, Tensor):
the_tensor = initialize_tensor("zeros", tensor.shape,
backend=tensor.backend, dtype=tensor.dtype)
else:
try:
tensor = backend.convert_to_tensor(tensor)
except TypeError as e:
error = "Input to zeros_like has invalid " \
"type causing error massage: \n" + str(e)
raise TypeError(error) from e
the_tensor = initialize_tensor("zeros", tensor.shape,
backend=backend, dtype=dtype)
return the_tensor
def randn(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
backend: Optional[Union[Text, AbstractBackend]] = None) -> Tensor:
"""Return a Tensor of shape `shape` of Gaussian random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with Gaussian random data.
"""
the_tensor = initialize_tensor("randn", shape, backend=backend, seed=seed,
dtype=dtype)
return the_tensor
def random_uniform(shape: Sequence[int],
dtype: Optional[Type[np.number]] = None,
seed: Optional[int] = None,
boundaries: Optional[Tuple[float, float]] = (0.0, 1.0),
backend: Optional[Union[Text, AbstractBackend]]
= None) -> Tensor:
"""Return a Tensor of shape `shape` of uniform random floats.
The Tensor has one dangling Edge per dimension.
Args:
shape : Shape of the array.
dtype, optional: dtype of array (default np.float64).
seed, optional: Seed for the RNG.
boundaries : Values lie in [boundaries[0], boundaries[1]).
backend (optional): The backend or its name.
Returns:
the_tensor : Tensor of shape `shape` filled with uniform random data.
"""
the_tensor = initialize_tensor("random_uniform", shape, backend=backend,
seed=seed, boundaries=boundaries, dtype=dtype)
return the_tensor
|
google/TensorNetwork
|
tensornetwork/linalg/initialization.py
|
Python
|
apache-2.0
| 8,131
|
[
"Gaussian"
] |
d3ff28236a1b8027a1fb7b97b238e8be24f4398059d7e2f69d063f1ad20bc960
|
##################################################################
# SSURGO_to_csv.py Apr 2015
# ritvik sahajpal (ritvik@umd.edu)
#
##################################################################
import constants, logging, os, us, csv, pdb, glob
import numpy as np
import pandas as pd
def open_or_die(path_file, perm='r', header=None, sep=' ', delimiter=' ', usecols=[]):
"""
Open file or quit gracefully
:param path_file: Path of file to open
:return: Handle to file (netCDF), or dataframe (csv) or numpy array
"""
try:
if os.path.splitext(path_file)[1] == '.txt':
df = pd.read_csv(path_file, sep=sep, header=header, usecols=usecols)
return df
else:
logging.info('Invalid file type')
except:
logging.info('Error opening file '+path_file)
def component_aggregation(group):
# Sort by depth, makes it easier to process later
group.sort('hzdept_r',inplace=True)
# Determine number of soil layers
list_depths = np.append(group['hzdepb_r'],group['hzdept_r'])
num_layers = len(np.unique(list_depths))-1 # Exclude 0
if(num_layers <= 0):
logging.warn('Incorrect number of soil layers '+str(num_layers)+' '+str(group['cokey']))
return
return group
def read_ssurgo_tables(soil_dir):
# Read in SSURGO data
pd_mapunit = open_or_die(soil_dir+os.sep+constants.MAPUNIT+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.mapunit_vars.keys())
pd_component = open_or_die(soil_dir+os.sep+constants.COMPONENT+'.txt',sep=constants.SSURGO_SEP,header=None,usecols=constants.component_vars.keys())
pd_chorizon = open_or_die(soil_dir+os.sep+constants.CHORIZON+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.chorizon_vars.keys())
pd_muaggatt = open_or_die(soil_dir+os.sep+constants.MUAGGATT+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.muaggatt_vars.keys())
pd_chfrags = open_or_die(soil_dir+os.sep+constants.CHFRAGS+'.txt' ,sep=constants.SSURGO_SEP,header=None,usecols=constants.chfrags_vars.keys())
# if any of the dataframes are empty then return a error value
if ((pd_mapunit is None) or (pd_component is None) or (pd_chorizon is None) or (pd_muaggatt is None) or (pd_chfrags is None)):
raise ValueError('Empty dataframe from one of SSURGO files')
# Rename dataframe columns from integers to SSURGO specific names
pd_mapunit.rename(columns=constants.mapunit_vars ,inplace=True)
pd_component.rename(columns=constants.component_vars,inplace=True)
pd_chorizon.rename(columns=constants.chorizon_vars ,inplace=True)
pd_muaggatt.rename(columns=constants.muaggatt_vars ,inplace=True)
pd_chfrags.rename(columns=constants.chfrags_vars ,inplace=True)
# Sum up Fragvol_r in pd_chfrags
# See http://www.nrel.colostate.edu/wiki/nri/images/2/21/Workflow_NRI_SSURGO_2010.pdf
pd_chfrags = pd_chfrags.groupby('chkey').sum().reset_index(level=0)
# Aggregate pd_chorizon data based on cokey
chorizon_agg = pd_chorizon.groupby('cokey').apply(component_aggregation)
# Join chfrags and chorizon_agg data
chfrags_chor = chorizon_agg.merge(pd_chfrags,left_on='chkey',right_on='chkey')
# Join chfrags_chor data to the component table
ccomp = chfrags_chor.merge(pd_component,left_on='cokey',right_on='cokey')
# Join the chor_comp data to pd_muaggatt table
# Set how='outer' since we do not want to miss any mukey's
muag_ccomp = ccomp.merge(pd_muaggatt,left_on='mukey',right_on='mukey', how='outer')
# Join muag_ccomp to mapunit data
# Set how='outer' since we do not want to miss any mukey's
map_data = muag_ccomp.merge(pd_mapunit,left_on='mukey',right_on='mukey', how='outer')
return map_data
def SSURGO_to_csv():
sgo_data = pd.DataFrame()
for st in constants.list_st:
logging.info(st)
# For each state, process the SSURGO tabular files
for dir_name, subdir_list, file_list in os.walk(constants.data_dir):
if('_'+st+'_' in dir_name and constants.TABULAR in subdir_list):
logging.info(dir_name[-3:]) # County FIPS code
try:
tmp_df = read_ssurgo_tables(dir_name+os.sep+constants.TABULAR)
except ValueError:
logging.info('Empty dataframe from one of SSURGO files')
continue
tmp_df['state'] = st
tmp_df['county'] = dir_name[-3:]
tmp_df['FIPS'] = int(us.states.lookup(st).fips+dir_name[-3:])
sgo_data = pd.concat([tmp_df,sgo_data],ignore_index =True)
# Drop columns with all missing values
sgo_data.dropna(axis=1,how='all',inplace=True)
# Replace hydgrp values with integers
sgo_data.replace(constants.hydgrp_vars,inplace=True)
# If any null values exist, replace with mean of value in mukey
df3 = pd.DataFrame()
logging.info('If any null values exist, replace with mean of value in mukey')
if(np.any(sgo_data.isnull())):
df1 = sgo_data.set_index('mukey')
df2 = sgo_data.groupby('mukey').mean()
df3 = df1.combine_first(df2)
# If any null values remain, replace by county mean
logging.info('If any null values remain, replace by county mean')
if(np.any(df3.isnull())):
df1 = df3.reset_index().set_index('FIPS')
cnt_mean = sgo_data.groupby(['FIPS']).mean()
df3 = df1.combine_first(cnt_mean)
else:
pass
# If any null values remain, replace by state mean
logging.info('If any null values remain, replace by state mean')
if(np.any(df3.isnull())):
df1 = df3.reset_index().set_index('state')
st_mean = sgo_data.groupby(['state']).mean()
df3 = df1.combine_first(st_mean)
else:
pass
else:
pass
df3.reset_index(inplace=True)
# Convert niccdcd and hydgrp to integers
df3['hydgrp'] = df3['hydgrp'].astype(int)
df3['niccdcd'] = df3['niccdcd'].astype(int)
# Drop components with non zero initial depth
#logging.info('Drop faulty components')
#drop_df = df3.groupby('cokey').filter(lambda x: x['hzdept_r'].min() <= 0)
logging.info('Select the dominant component')
dom_df = df3.groupby('mukey').apply(lambda g: g[g['comppct_r']==g['comppct_r'].max()])
#drop_df.to_csv(constants.out_dir+'drop.csv')
out_ssurgo_dir = constants.r_soil_dir+os.sep+constants.SOIL+os.sep
constants.make_dir_if_missing(out_ssurgo_dir)
df3.to_csv(out_ssurgo_dir+os.sep+constants.all)
dom_df.to_csv(out_ssurgo_dir+os.sep+constants.dominant)
logging.info('Done!')
return dom_df
def write_epic_soil_file(group):
if(not(os.path.isfile(constants.t_soil_dir+str(int(group.mukey.iloc[0]))+'.sol'))):
epic_file = open(constants.t_soil_dir+str(int(group.mukey.iloc[0]))+'.sol', 'w')
num_layers = len(group.hzdepb_r)
# Line 1
epic_file.write(str(group.mukey.iloc[0])+' State: '+str(group.state.iloc[0])+' FIPS: '+str(group.FIPS.iloc[0])+'\n')
# Line 2
epic_file.write(('{:8.2f}'*10+'\n').format(group.albedodry_r.iloc[0],group.hydgrp.iloc[0],0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0))
# Line 3
epic_file.write(('{:8.2f}'*9+'\n').format(0.0,0.0,100.0,0.0,0.0,0.0,0.0,0.0,0.0))
# Soil characteristics per soil layer
epic_file.write(''.join(['{:8.2f}'.format(n*constants.CONV_DEPTH) for n in group.hzdepb_r])+'\n') # Depth to bottom of layer (m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.dbthirdbar_r])+'\n') # Bulk Density (T/m^3)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.wfifteenbar_r])+'\n') # Soil water content at wilting point (1500 KPA), (m/m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.wthirdbar_r])+'\n') # Water content at field capacity (33 KPA), (m/m)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.sandtotal_r])+'\n') # Sand content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.silttotal_r])+'\n') # Silt content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial Org N concentration (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.ph1to1h2o_r])+'\n') # Soil pH ()
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.sumbases_r])+'\n') # Sum of bases (cmol/kg)
epic_file.write(''.join(['{:8.2f}'.format(n*constants.OM_TO_WOC) for n in group.om_r])+'\n') # Organic matter content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.caco3_r])+'\n') # CaCO3 content (%)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.cec7_r])+'\n') # Cation exchange capacity (cmol/kg)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.Fragvol_r])+'\n') # Coarse fragment content (% by vol)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial NO3 conc (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Initial Labile P (g/T) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # Crop residue (T/ha) ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n) for n in group.dbovendry_r])+'\n') # Oven dry Bulk Density (T/m^3)
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n') # ---zeros---
epic_file.write(''.join(['{:8.2f}'.format(n*constants.CONV_KSAT) for n in group.ksat_r])+'\n') # Saturated conductivity (mm/h)
for i in range(constants.ZERO_LINES):
epic_file.write(''.join(['{:8.2f}'.format(n) for n in np.zeros(num_layers)])+'\n')
# EPIC constant lines
epic_file.write('\n\n\n')
epic_file.write(' 275. 200. 150. 140. 130. 120. 110.\n')
epic_file.write(' 0.20 0.40 0.50 0.60 0.80 1.00 1.20\n')
epic_file.write(' .004 .006 .008 .009 .010 .010 .010\n')
epic_file.close()
else:
logging.info('File exists: '+constants.t_soil_dir+str(group.mukey.iloc[0])+'.sol')
def csv_to_EPIC(df):
try:
df.groupby('mukey').apply(write_epic_soil_file)
except Exception,e:
logging.info(str(e))
# Output ieSlList.dat
epic_SlList_file = open(constants.out_dir+os.sep+constants.SLLIST, 'w')
idx = 1
for filename in glob.iglob(os.path.join(constants.t_soil_dir, '*.sol')):
epic_SlList_file.write(('%5s "soils//%s"\n')%(idx,os.path.basename(filename)))
idx += 1
epic_SlList_file.close()
if __name__ == '__main__':
df = SSURGO_to_csv()
csv_to_EPIC(df)
#def uniq_vals(group):
# try:
# return group[group['cokey'] == mode(np.array(group.cokey))[0][0]]
# except Exception, e:
# logger.info(e)
#def wavg(val_col_name, wt_col_name):
# def inner(group):
# return (group[val_col_name] * group[wt_col_name]).sum() / group[wt_col_name].sum()
# inner.__name__ = val_col_name
# return inner
#def wt_mean(group):
# # custom function for calculating a weighted mean
# # values passed in should be vectors of equal length
# g = group.groupby('layer_id')
# for key,val in epic_soil_vars.iteritems():
# group[val] = group[val] / g[val].transform('sum') * group['compct_r']
# return group
#def average_mukey_soil_vars(group):
# return group.mean(numeric_only=True)
#df4 = pd.DataFrame()
#df5 = pd.DataFrame()
#logger.info('Compute weighted means')
#for key,val in epic_soil_vars.iteritems():
# print val
# df4[val] = df3.groupby(['mukey','layer_id']).apply(wavg(val, 'comppct_r'))
#cols = [col for col in df4.columns if col not in ['mukey', 'layer_id']]
#tmp_df4 = df4[cols]
#df3.reset_index(inplace=True)
#df4.reset_index(inplace=True)
#df5 = df3[df3.columns.difference(tmp_df4.columns)]
#df6 = df5.groupby('mukey').apply(uniq_vals)
#df7 = df4.merge(df6,on=['mukey','layer_id'])
#df3.to_csv(out_dir+'SSURGO3.csv')
#df4.to_csv(out_dir+'SSURGO4.csv')
#df5.to_csv(out_dir+'SSURGO5.csv')
#df6.to_csv(out_dir+'SSURGO6.csv')
#df7.to_csv(out_dir+'SSURGO7.csv')
#logger.info('Done!')
#pdb.set_trace()
#logger.info('Done!')
|
ritviksahajpal/EPIC
|
SSURGO/SSURGO_to_csv.py
|
Python
|
mit
| 12,793
|
[
"NetCDF"
] |
28bdffd09c32d756fc6325bb8b271b1848120f429deb59e012a60030e0d13751
|
#input essential info:
#1.server info
#2.user name + user password
#3.initial environment
from urlparse import urlparse
from splinter import Browser
import threading,json,random,re,time,getpass
#constant
arrFiled = []
i = 0
while i < 18:
i = i + 1
arrFiled.append("http://ts3.travian.com/build.php?id=" + str(i))
userInfo = []
#funciton summary
def commonStrip(var):
var = var.encode()
p = re.compile("\d+,\d+?")
for com in p.finditer(var):
mm = com.group()
var = var.replace(mm, mm.replace(",", ""))
var = int(var)
return var
def loop(func1, func2, minloop, maxloop):
frequency = random.uniform(minloop, maxloop)
print "\033[34;1m" + "Attention: after ", frequency, " seconds, browser will refresh page." + "\033[0m"
print "\033[35;1m" + time.strftime('%Y-%m-%d %A %X %Z',time.localtime(time.time())) + "\033[0m"
print "\033[36;1m" + "reload page, continue...... \n" + "\033[0m"
func1()
func2()
time.sleep(frequency)
loop(func1, func2, minloop, maxloop)
#menu driven interface
def getChoice():
print "\033[1;32;41;1m" + "\nWelcome to MAD MAX World" + "\n(I)nput your account + password" + "\n(S)tart new game" + "\n(U)pgrade your field" + "\n(B)oost your soldier" + "\n(Q)uit" + "\033[0m"
choose = raw_input(">>> ")
choice = choose.lower()
return choice
def info():
global userInfo
print "\033[35;1m" + "Please input your account: " + "\033[0m"
accountName = raw_input()
print "\033[35;1m" + "Please input your password: " + "\033[0m"
accountPassword = getpass.getpass()
print "\033[35;1m" + "Please input your server number: " + "\033[0m"
accoutServerNum = raw_input()
userInfo.append('firefox')
userInfo.append(accoutServerNum)
userInfo.append(accountName)
userInfo.append(accountPassword)
print "\033[1;32;41;1m" + "Have collected your info, please choose what to do: " + "\033[0m"
def openBrowser():
global user
user = init(userInfo[0], userInfo[1], userInfo[2], userInfo[3])
print "\033[36;1m" + "We will start game for you" + "\033[0m"
def boost():
global boostSoldier,user
print "\033[36;1m" + "Which solider you want to boost: ('legionnaire' or 'Praetorian')" + "\033[0m"
soliderName = raw_input()
user = init(userInfo[0], userInfo[1], userInfo[2], userInfo[3])
user.establish()
boostSoldier = boostSoldier(user.browser, soliderName)
loop(boostSoldier.reloadPage, boostSoldier.boost, 15, 25)
def upgrade():
global upgradeField,user
print "\033[36;1m" + "You want to upgrade your field? " + "\033[0m"
user = init(userInfo[0], userInfo[1], userInfo[2], userInfo[3])
user.establish()
upgradeField = upgradeField(user.browser)
loop(upgradeField.reloadPage, upgradeField.upgrade, 60, 80)
#main class
class init:
loginUserCounter = 0
def __init__(self, browserType, serverNum, username, password):
self.browserType = browserType
self.serverNum = serverNum
self.username = username
self.password = password
init.browser = Browser(browserType)
init.loginUserCounter += 1
def establish(self):
url = 'http://ts' + str(self.serverNum) + '.travian.com/'
#open browser and into game
init.browser.visit(url)
#fill username and password
init.browser.fill('name',self.username)
init.browser.fill('password',self.password)
btnLogin = init.browser.find_by_name('s1')
btnLogin.click()
def destory(self):
window = init.browser.windows[0]
if window.title == 'Travian com3':
window.close()
else:
window = window.next
class boostSoldier:
trigger = 1
#soldierType is used to describ how many resource to use
soldierType = {
'legionnaire' : [120, 100, 150, 30],
'Praetorian' : [100, 130, 160, 70],
'Imperian' : [150, 160, 210, 80]
}
def __init__(self, browser, chooseType):
self.browser = browser
self.chooseType = chooseType
boostSoldier.Type = boostSoldier.soldierType[chooseType]
def reloadPage(self):
if boostSoldier.trigger == 1:
self.browser.reload()
else:
print "Boost process has been stopped!"
def boost(self):
tempArray = []
arrName = ['Lumber','Clay','Iron','Crop']
i = 0
lumber = self.browser.find_by_id('l1').value
clay = self.browser.find_by_id('l2').value
iron = self.browser.find_by_id('l3').value
crop = self.browser.find_by_id('l4').value
#strip and prepare all data
lumber = commonStrip(lumber)
clay = commonStrip(clay)
iron = commonStrip(iron)
crop = commonStrip(crop)
tempArray.append(lumber)
tempArray.append(clay)
tempArray.append(iron)
tempArray.append(crop)
#output all essential data
while i < 4:
print "Current " + arrName[i] + " is " + str(tempArray[i])
i = i + 1
if tempArray[0] > boostSoldier.Type[0] and tempArray[1] > boostSoldier.Type[1] and tempArray[2] > boostSoldier.Type[2] and tempArray[3] > boostSoldier.Type[3]:
print "\033[31;1m" + "Good, we have enough resources to boost more soilders \n" + "\033[0m"
o = urlparse(self.browser.url )
boostUrl = "http://" + o.netloc + "/build.php?id=32"
self.browser.visit(boostUrl)
def soldierChoose(x):
switcher = {
'legionnaire' : 't1',
'Praetorian' : 't2',
'Imperian' : 't3'
}
return switcher.get(x, 'none')
self.browser.fill(soldierChoose(self.chooseType), '1')
soldierBtn = self.browser.find_by_id('s1')
soldierBtn.click()
else:
print "\033[33;1m" + "Sorry, we do not have enough resources, will try after reload \n" + "\033[0m"
def stop():
boostSoldier.trigger = 0
class upgradeField:
position = 0
def __init__(self, browser):
self.browser = browser
def reloadPage(self):
self.browser.reload()
def upgrade(self):
p = upgradeField.position % 18
upgradeField.position += 1
print "\033[41;1m" + arrFiled[p] + "\033[0m"
urlBuild = arrFiled[p]
self.browser.visit(urlBuild)
buildBtn = self.browser.find_by_css('.green .build')
if buildBtn:
buildBtn.click()
print "\033[31;1m" + "Push build request to queue" + "\033[0m"
else:
print "\033[31;1m" + "Still not ready to build" + "\033[0m"
# TODO:
# def stop():
#run
choice = getChoice()
while choice != "q":
if choice == "i":
info()
elif choice == "s":
openBrowser()
elif choice == "u":
upgrade()
elif choice == "b":
boost()
else:
print("Invalid choice, please choose again")
print("\n")
choice = getChoice()
|
laboratoryyingong/TravianPlugin
|
TravianPlugin.py
|
Python
|
mit
| 7,256
|
[
"VisIt"
] |
a7176854393897e19380ead16c9454871b4a76e8750d70e95481d840ce4eb099
|
"""Base classes for parameters of algorithms with biomod functionality"""
from zope.interface import provider
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from zope.schema.interfaces import IVocabularyFactory
brt_var_monotone_vocab = SimpleVocabulary([
SimpleTerm(-1, '-1', u'-1'),
SimpleTerm(1, '+1', u'+1'),
])
@provider(IVocabularyFactory)
def brt_var_monotone_vocab_factory(context):
return brt_var_monotone_vocab
brt_family_vocab = SimpleVocabulary([
SimpleTerm('bernoulli', 'bernoulli', 'bernoulli (binomial)'),
SimpleTerm('poisson', 'poisson', 'poisson'),
SimpleTerm('laplace', 'laplace', 'laplace'),
SimpleTerm('gaussian', 'gaussian', 'gaussian'),
])
@provider(IVocabularyFactory)
def brt_family_vocab_factory(context):
return brt_family_vocab
lm_na_action_vocab = SimpleVocabulary([
SimpleTerm('na.fail', 'na.fail', 'na.fail'),
SimpleTerm('na.omit', 'na.omit', 'na.omit'),
SimpleTerm('na.exclude', 'na.exclude', 'na.exclude'),
SimpleTerm(None, 'NULL', 'NULL')
])
@provider(IVocabularyFactory)
def lm_na_action_vocab_factory(context):
return lm_na_action_vocab
|
chuijbers/org.bccvl.compute
|
src/org/bccvl/compute/vocabularies.py
|
Python
|
gpl-2.0
| 1,153
|
[
"Gaussian"
] |
a8d51a65cedcea3273cd68afa8dc64ff09edb85deb1713921ce1896017a225f3
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from abc import ABCMeta, abstractmethod
import six
@six.add_metaclass(ABCMeta)
class IdentifierCollection(object):
"""
A collection of all identifiers associated with a corresponding Subject.
An *identifier* in this context is an identifying attribute, such as a
username or user id or social security number or anything else considered
an 'identifying' attribute for a Subject.
An IdentifierCollection organizes its internal identifiers based on the
Realm where they came from when the Subject was first created. To obtain
the identifiers(s) for a specific source (realm), see the from_source method.
You can also see which realms contributed to this collection via the
source_names property.
"""
@property
@abstractmethod
def primary_identifier(self):
"""
Returns the primary identifier used application-wide to uniquely identify
the owning account/Subject.
The value is usually always a uniquely identifying attribute specific to
the data source that retrieved the account data. Some examples:
- a UUID
- a long integer value such as a surrogate primary key in a relational database
- an LDAP UUID or static DN
- a String username unique across all user accounts
Multi-Realm Applications
-------------------------
In a single-Realm application, typically there is only ever one unique
principal to retain and that is the value returned from this method.
However, in a multi-Realm application, where the IdentifierCollection
might retain identifiers across more than one realm, the value returned
from this method should be the single identifier that uniquely identifies
the subject for the entire application.
That value is of course application specific, but most applications will
typically choose one of the primary identifiers from one of the Realms.
Yosai's default implementations of this interface make this assumption
by usually simply returning the next iterated upon identifier
obtained from the first consulted/configured Realm during the
authentication attempt. This means in a multi-Realm application, Realm
configuraiton order matters if you want to retain this default heuristic.
If this heuristic is not sufficient, most Shiro end-users will need to
implement a custom AuthenticationStrategy. An AuthenticationStrategy
has exact control over the IdentifierCollection returned at the end of
an authentication attempt via the AuthenticationStrategy
implementation.
:returns: the primary identifier used to uniquely identify the owning
account/Subject
"""
pass
@abstractmethod
def by_type(self, identifier_type):
"""
this method's value is controversial in nature in Shiro as it obtains
identifiers by type
"""
pass
@abstractmethod
def from_source(self, realm_name):
"""
obtain the identifier for a particular source (realm)
"""
pass
@property
@abstractmethod
def source_names(self):
"""
obtain a list of sources (realms) that identifiers have been obtained
from
"""
pass
@property
@abstractmethod
def is_empty(self):
"""
confirms whether the identifier collection is empty
"""
pass
def __eq__(self, other):
if self is other:
return True
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
class MutableIdentifierCollection(IdentifierCollection):
@abstractmethod
def add(self, source_name, identifier):
"""
:type identifier: string
:type source_name: string
"""
pass
@abstractmethod
def add_collection(self, identifier_collection):
"""
:type identifier_collection: subject_abcs.IdentifierCollection
"""
pass
@abstractmethod
def clear(self):
pass
class IdentifierMap(IdentifierCollection):
@abstractmethod
def get_realm_identifier(self, realm_name):
pass
@abstractmethod
def set_realm_identifier(self, realm_name, identifier):
pass
@abstractmethod
def set_realm_identifier(self, realm_name, identifier_name, identifier):
pass
@abstractmethod
def get_realm_identifier(self, realm_name, realm_identifier):
pass
@abstractmethod
def remove_realm_identifier(self, realm_name, identifier_name):
pass
@six.add_metaclass(ABCMeta)
class SubjectContext(object):
"""
A SubjectContext is a 'bucket' of data presented to a SecurityManager
that interprets data used to construct Subject instances. It is essentially
a Map of data with a few additional methods for easy retrieval of objects
commonly used to construct Subject instances.
The map can contain anything additional that might be needed by the
SecurityManager or SubjectFactory implementation to construct Subject
instances.
Data Resolution
----------------
The SubjectContext interface allows for heuristic resolution of data
used to construct a subject instance. That is, if an attribute has not been
explicitly assigned, the *resolve methods use heuristics to obtain data
using methods other than direct attribute access.
For example, if one references the identifiers property and no identifiers
are returned, perhaps the identifiers exist in a session or another
attribute in the context. The resolve_identifiers method will know
how to resolve the identifiers based on heuristics. If the *resolve methods
return None, then the data could not be achieved through heuristics and must
be considered unavailable in the context.
The general idea is that the normal direct attribute access can be called to
determine whether the value was explicitly set. The *resolve methods are
used when actually constructing a Subject instance to ensure the most
specific/accurate data is used.
USAGE
--------------
Most Yosai end-users will never use a SubjectContext instance directly and
instead will use a SubjectBuilder (which internally uses a SubjectContext)
to build Subject instances.
"""
@abstractmethod
def resolve_security_manager(self):
"""
Resolves the SecurityManager instance to be used to back the constructed
Subject instance (typically used to support DelegatingSubject implementations)
"""
pass
@abstractmethod
def resolve_identifiers(self, session):
pass
@abstractmethod
def resolve_session(self):
pass
@abstractmethod
def resolve_authenticated(self, session):
pass
@abstractmethod
def resolve_host(self, session):
pass
def __eq__(self, other):
if self is other:
return True
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
@six.add_metaclass(ABCMeta)
class Subject(object):
"""
A Subject represents state and security operations for a *single*
application user. These operations include authentication (login/logout),
authorization (access control), and session access. A subject is Yosai's
primary mechanism for single-user security functionality.
Acquiring a Subject
----------------------
To acquire the currently-executing Subject, application developers will
almost always use Yosai:
Yosai.get_subject()
Almost all security operations should be performed with the Subject returned
from this method.
Permission methods
--------------------
Note that there are many Permission methods in this interface that
accept a list of either String arguments or authz_abcs.Permission instances.
The underlying Authorization subsystem implementations will usually simply
convert these String values to Permission instances and then call the
corresponding method. (Yosai's default implementations do String-to-Permission
conversion.
remembered attribute:
---------------------
Returns True if this Subject has an identity (it is not anonymous) and
the identity (aka identifiers}) is remembered from a successful
authentication during a previous session.
Although the underlying implementation determines exactly how this
method functions, most implementations have this method act as the
logical equivalent to this code:
- subject.identifiers is not None and subject.authenticated
Note as indicated by the above code example, if a Subject is remembered,
it is *NOT* considered authenticated. A check against authenticated
is a more strict check than that reflected by this method. For example,
a check to see whether a subject can access financial information should
almost always depend on subject.authenticated rather, than this method,
to *guarantee* a verified identity.
Once the subject is authenticated, it is no longer considered only
remembered because its identity would have been verified during the
current session.
Remembered vs Authenticated
-----------------------------
Authentication is the process of *proving* a subject is who it claims to
be. When a user is only remembered, the remembered identity gives the
system an idea who that user probably is, but in reality, the system
has no way of absolutely *guaranteeing* whether the remembered Subject
represents the user currently using the application.
So, although many parts of the application can still perform user-specific
logic based on the remembered identifiers, such as customized views,
the application should never perform highly-sensitive operations until
the user has legitimately verified its identity by executing a successful
authentication attempt.
We see this paradigm all over the web, and we will use
<a href="http://www.amazon.com">Amazon.com</a> as an example:
When you visit Amazon.com and perform a login and ask it to 'remember me',
Amazon will set a cookie with your identity. If you don't log out and
your session expires, but you come back the next day, Amazon still knows
who you *probably* are and so you see all of your book and movie
recommendations and similar user-specific features since these are based
on your (remembered) user id (identifiers).
However, if you try to do something sensitive, such as access your
account's billing data, Amazon forces you to perform an actual log-in,
requiring your username and password.
Amazon does this because although it assumes your identity from
'remember me', it recognized that you were not actually authenticated.
The only way to really guarantee you are who you say you are, and
therefore allow you access to sensitive account data, is to require you
to perform an actual successful authentication. You can check this
guarantee via the subject.authenticated method and not via this method.
"""
@property
@abstractmethod
def identifiers(self):
"""
Returns this Subject's application-wide uniquely identifying principal,
or None if this Subject is anonymous because it doesn't yet have any
associated account data (for example, if they haven't logged in).
The term 'principal' is just a fancy security term for any identifying
attribute(s) of an application user, such as a username, or user id, or
public key, or anything else you might use in your application to
identify a user. Yosai replaces the term 'principal' with 'identifier'
in recognition of terminology confusion that Shiro faces using 'principal'.
Uniqueness
-----------
Although given names and family names (first/last) are technically
considered identifiers as well, Yosai expects the object returned from
this method to be an identifying attribute unique across your entire
application.
This implies that attributes like given names and family names are usually
poor candidates as return values since they are rarely guaranteed to be
unique. Items often used for this value:
- A long-int RDBMS surrogate primary key
- An application-unique username
- A UUID
- An LDAP Unique ID
- any other similar, suitable, and unique mechanism valuable to your
application
Most implementations will simply return identifiers.primary_principal.
"""
pass
@identifiers.setter
@abstractmethod
def identifiers(self, v):
"""
Returns this Subject's principals (identifying attributes) in the form
of an IdentifierCollection or None if this Subject is anonymous because
it doesn't yet have any associated account data (for example, if they
haven't logged in).
The word 'principals' is nothing more than a fancy security term for
identifying attributes associated with a Subject, aka, application user.
For example, user id, a surname (family/last name), given (first) name,
social security number, nickname, username, etc, are all examples of a
principal. Yosai replaces the term 'principal' with 'identifier'
in recognition of terminology confusion that Shiro faces using
'principal'.
"""
pass
@abstractmethod
def is_permitted(self, permission_s):
"""
Determines whether any Permission(s) associated with the subject
implies the requested Permission(s) provided.
:param permission_s: a collection of 1..N permissions, all of the same type
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:returns: a set of tuple(s), each containing the Permission
requested and a Boolean indicating whether permission is
granted
- the tuple format is: (Permission, Boolean)
"""
pass
@abstractmethod
def is_permitted_collective(self, permission_s, logical_operator):
"""
This method determines whether the requested Permission(s) are
collectively granted authorization. The Permission(s) associated with
the subject are evaluated to determine whether authorization is implied
for each Permission requested. Results are collectively evaluated using
the logical operation provided: either ANY or ALL.
If operator=ANY: returns True if any requested permission is implied permission
If operator=ALL: returns True if all requested permissions are implied permission
Else returns False
:param permission_s: a collection of 1..N permissions, all of the same type
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:param logical_operator: any or all
:type logical_operator: function (stdlib)
:rtype: bool
"""
pass
@abstractmethod
def check_permission(self, permission_s, logical_operator):
"""
This method determines whether the requested Permission(s) are
collectively granted authorization. The Permission(s) associated with
the subject are evaluated to determine whether authorization is implied
for each Permission requested. Results are collectively evaluated using
the logical operation provided: either ANY or ALL.
This method is similar to is_permitted_collective except that it raises
an AuthorizationException if collectively False else does not return any
value.
:param permission_s: a collection of 1..N permissions, all of the same type
:type permission_s: List of authz_abcs.Permission object(s) or String(s)
:param logical_operator: any or all
:type logical_operator: function (stdlib)
:raises AuthorizationException: if the user does not have sufficient
permission
"""
pass
@abstractmethod
def has_role(self, role_s):
"""
Determines whether a Subject is a member of the Role(s) requested
:param role_s: 1..N role identifiers (strings)
:type role_s: Set of Strings
:returns: a set of tuple(s), each containing the Role identifier
requested and a Boolean indicating whether the subject is
a member of that Role
- the tuple format is: (role, Boolean)
"""
pass
@abstractmethod
def has_role_collective(self, role_s, logical_operator):
"""
This method determines whether the Subject's role membership
collectively grants authorization for the roles requested. The
Role(s) associated with the subject are evaluated to determine
whether the roles requested are sufficiently addressed by those that
the Subject is a member of. Results are collectively evaluated using
the logical operation provided: either ANY or ALL.
If operator=ANY, returns True if any requested role membership is
satisfied
If operator=ALL: returns True if all of the requested permissions are
implied permission
Else returns False
:param role_s: 1..N role identifiers (strings)
:type role_s: Set of Strings
:param logical_operator: any or all
:type logical_operator: function (stdlib)
:rtype: bool
"""
pass
@abstractmethod
def check_role(self, role_s, logical_operator):
"""
This method determines whether the Subject's role membership
collectively grants authorization for the roles requested. The
Role(s) associated with the subject are evaluated to determine
whether the roles requested are sufficiently addressed by those that
the Subject is a member of. Results are collectively evaluated using
the logical operation provided: either ANY or ALL.
This method is similar to has_role_collective except that it raises
an AuthorizationException if collectively False else does not return any
:param role_s: 1..N role identifiers (strings)
:type role_s: Set of Strings
:param logical_operator: any or all
:type logical_operator: function (stdlib)
:raises AuthorizationException: if the user does not have sufficient
role membership
"""
pass
@abstractmethod
def login(self, authc_token):
"""
Performs a login attempt for this Subject/user.
If unsuccessful, a subclass of AuthenticationException is raised,
identifying why the attempt failed.
If successful, the Account data associated with the submitted
identifiers/credentials will be associated with this Subject and the
method will return quietly.
Upon returning quietly, this Subject instance can be considered
authenticated and its identifiers attribute will be non-None and
its authenticated property will be True.
:param authc_token: the token encapsulating the subject's identifiers
and credentials to be passed to the Authentication
subsystem for verification
:type authc_token: authc_abcs.AuthenticationToken
:raises AuthenticationException: if the authentication attempt fails
"""
pass
@abstractmethod
def get_session(self, create=None):
"""
Returns the application Session associated with this Subject based on
the following criteria:
- If there is already an existing Session associated with this
Subject, it is returned and the create argument is ignored.
- If no Session exists and create is True, a new Session is created,
associated with this Subject and then returned.
- If no Session exists and create is False, None is returned.
:returns: the application Session associated with this Subject
"""
pass
@abstractmethod
def logout(self):
"""
Logs out this Subject and invalidates and/or removes any associated
entities, such as a Session and authorization data. After this method
is called, the Subject is considered 'anonymous' and may continue to be
used for another log-in, if desired.
Web Environment Warning
-------------------------
Calling this method in web environments will usually remove any
associated session cookie as part of session invalidation. Because
cookies are part of the HTTP header, and headers can only be set before
the response body (html, image, etc) is sent, this method in web
environments must be called before *any* content is rendered.
The typical approach most applications use in this scenario is to redirect
the user to a different location (e.g. home page) immediately after
calling this method. This is an effect of the HTTP protocol itself and
not a reflection of Yosai's implementation.
Non-HTTP environments may of course use a logged-out subject for login
again if desired.
"""
pass
@abstractmethod
def run_as(self, identifiers):
"""
Allows this subject to 'run as' or 'assume' another identity indefinitely.
This method can only be called when the Subject instance already has an
identity (i.e. it is remembered from a previous log-in or it has
authenticated in its current session).
Some notes about run_as:
- You can determine whether a Subject is 'running as' another identity
by checking the run_as property.
- If running as another identity, you can determine what the previous
identity, the identity just prior to running-as, is by calling the
get_previous_identifiers method.
- When you want a Subject to stop running as another identity, you can
return to its previous identity (the identity just prior to running-as)
by calling the release_run_as method.
:param identifiers: the identity to 'run as', aka the identity to
*assume* indefinitely
:type identifiers: subject_abcs.IdentifierCollection
"""
pass
@abstractmethod
def is_run_as(self):
"""
Returns True if this Subject is 'running as' another identity other than
its original one or False otherwise (normal Subject state). See the
run_as method for more information.
:returns: True if this Subject is 'running as' another identity other
than its original one or False otherwise (normal Subject state)
:rtype: bool
"""
pass
@abstractmethod
def get_previous_identifiers(self):
"""
Returns the previous 'pre run as' identity of this Subject before
assuming the current run_as identity, or None if this Subject is not
operating under an assumed identity (normal state). See the run_as
method for more information.
:returns: the previous 'pre run as' identity of this Subject before
assuming the current run_as identity, or None if this Subject
is not operating under an assumed identity (normal state)
"""
pass
@abstractmethod
def release_run_as(self):
"""
This method releases the current 'run as' (assumed) identity and reverts
to the previous 'pre run as' identity that existed before run_as was called.
This method returns 'run as' (assumed) identity being released or None
if this Subject is not operating under an assumed identity.
:returns: the 'run as' (assumed) identity being released or None if this
Subject is not operating under an assumed identity
"""
pass
def __eq__(self, other):
if self is other:
return True
return (isinstance(other, self.__class__) and
self.__dict__ == other.__dict__)
# moved from /mgt:
@six.add_metaclass(ABCMeta)
class SubjectStore(object):
"""
A SubjectStore is responsible for persisting a Subject instance's internal
state such that the Subject instance can be recreated at a later time if
necessary.
Shiro's default SecurityManager implementations typically use a SubjectStore
in conjunction with a SubjectFactory after the SubjectFactory creates a
Subject instance, the SubjectStore is used to persist that subject's state
such that it can be accessed later if necessary.
Usage
--------
Note that this component is used by SecurityManager implementations to
manage Subject state persistence. It does *not* make Subject instances
accessible to the application (e.g. via yosai.subject).
"""
@abstractmethod
def save(self, subject):
"""
Persists the specified Subject's state for later access. If there is
a no existing state persisted, this persists it if possible (i.e. a
create operation). If there is existing state for the specified
Subject, this method updates the existing state to reflect the
current state (i.e. an update operation).
:param subject: the Subject instance for which its state will be
created or updated
:returns: the Subject instance to use after persistence is complete
- this can be the same as the method argument if the
underlying implementation does not need to make any Subject
changes
"""
pass
@abstractmethod
def delete(self, subject):
"""
Removes any persisted state for the specified Subject instance.
This is a delete operation such that the Subject's state will not be
accessible at a later time.
:param subject: the Subject instance for which any persistent state
should be deleted
"""
pass
# moved from /mgt:
@six.add_metaclass(ABCMeta)
class SubjectFactory(object):
"""
A SubjectFactory is responsible for constructing Subject instances as
needed
"""
def create_subject(self, context):
"""
Creates a new Subject instance reflecting the state of the specified
contextual data. The data would be anything required to required to
construct a Subject instance and its contents can vary based on
environment.
Any data supported by Shiro core will be accessible by one of the
SubjectContext(s) accessor properties or methods. All other data is
available as map attributes.
:param context: the contextual data to be used by the implementation
to construct an appropriate Subject instance
:returns: a Subject instance created based on the specified context
"""
pass
|
jellybean4/yosaipy2
|
yosaipy2/core/subject/abcs.py
|
Python
|
apache-2.0
| 28,464
|
[
"VisIt"
] |
e0e640f80c4aed37bdb1065b92de4c4fa2c3d89eef36c1bf6ed9be30130a2679
|
#!/usr/bin/env ccp4-python
"""Useful manipulations on PDB files"""
# Python imports
from collections import defaultdict
import copy
import logging
import os
import re
import sys
import unittest
import iotbx.file_reader
import iotbx.pdb
from ample.util import ample_util, pdb_model, residue_map, sequence_util
logger = logging.getLogger(__name__)
def add_missing_single_chain_ids(hierarchies, chain_id='A'):
"""Add any missing chain ids
Use the first chain.id as the template or the supplied chain_id if
none is present
"""
if not isinstance(hierarchies, list):
hierarchies = [hierarchies]
# Determine the chain_id for all non-named chains
chain = hierarchies[0].models()[0].only_chain()
if isinstance(chain.id, str) and len(chain.id) > 0:
chain_id = chain.id
# Ensure all chains have an id and return whether any were updated
updated = False
for h in hierarchies:
for model in h.models():
chain = model.only_chain()
if chain_id_is_blank(chain):
chain.id = chain_id
updated = True
return updated
def backbone(inpath=None, outpath=None):
"""Only output backbone atoms.
"""
# pdbcur segfaults with long pathnames
inpath = os.path.relpath(inpath)
outpath = os.path.relpath(outpath)
logfile = outpath + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpath, outpath).split()
stdin = 'lvatom "N,CA,C,O,CB[N,C,O]"'
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error stripping PDB to backbone atoms. See log:{0}".format(logfile))
def calpha_only(inpdb, outpdb):
"""Strip PDB to c-alphas only"""
logfile = outpdb + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpdb, outpdb).split()
stdin = 'lvatom "CA[C]:*"'
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error stripping PDB to c-alpha atoms")
def chain_id_is_blank(chain):
return isinstance(chain.id, str) and len(chain.id.strip()) == 0
def extract_chain(inpdb, outpdb, chainID=None, newChainID=None, cAlphaOnly=False, renumber=True):
"""Extract chainID from inpdb and renumner.
If cAlphaOnly is set, strip down to c-alpha atoms
"""
logfile = outpdb + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpdb, outpdb).split()
stdin = "lvchain {0}\n".format(chainID)
if newChainID:
stdin += "renchain {0} {1}\n".format(chainID, newChainID)
if cAlphaOnly:
stdin += 'lvatom "CA[C]:*"\n'
if renumber:
stdin += "sernum\n"
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error extracting chain {0}".format(chainID))
def extract_model(inpdb, outpdb, modelID):
"""Extract modelID from inpdb into outpdb"""
assert modelID > 0
logfile = outpdb + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpdb, outpdb).split()
stdin = "lvmodel /{0}\n".format(modelID)
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode != 0:
raise RuntimeError("Problem extracting model with cmd: {0}".format)
os.unlink(logfile)
def extract_header_pdb_code(pdb_input):
for line in pdb_input.title_section():
if line.startswith("HEADER ") and len(line) >= 65:
return line[62:66]
return None
def extract_header_title(pdb_input):
for line in pdb_input.title_section():
if line.startswith('TITLE'):
return line[10:-1].strip()
return None
def keep_matching(refpdb=None, targetpdb=None, outpdb=None, resSeqMap=None):
"""Only keep those atoms in targetpdb that are in refpdb and write the result to outpdb.
We also take care of renaming any chains.
"""
assert refpdb and targetpdb and outpdb and resSeqMap
tmp1 = ample_util.tmp_file_name() + ".pdb" # pdbcur insists names have a .pdb suffix
_keep_matching(refpdb, targetpdb, tmp1, resSeqMap=resSeqMap)
# now renumber with pdbcur
logfile = tmp1 + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(tmp1, outpdb).split()
stdint = """sernum
"""
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdint)
if retcode == 0:
# remove temporary files
os.unlink(tmp1)
os.unlink(logfile)
return retcode
def _keep_matching(refpdb=None, targetpdb=None, outpdb=None, resSeqMap=None):
"""Create a new pdb file that only contains that atoms in targetpdb that are
also in refpdb. It only considers ATOM lines and discards HETATM lines in the target.
Args:
refpdb: path to pdb that contains the minimal set of atoms we want to keep
targetpdb: path to the pdb that will be stripped of non-matching atoms
outpdb: output path for the stripped pdb
"""
assert refpdb and targetpdb and outpdb and resSeqMap
def _output_residue(refResidues, targetAtomList, resSeqMap, outfh):
"""Output a single residue only outputting matching atoms, shuffling the atom order and changing the resSeq num"""
# Get the matching list of atoms
targetResSeq = targetAtomList[0].resSeq
refResSeq = resSeqMap.ref2target(targetResSeq)
# Get the atomlist for the reference
for (rid, alist) in refResidues:
if rid == refResSeq:
refAtomList = alist
break
# Get ordered list of the ref atom names for this residue
rnames = [x.name for x in refAtomList]
if len(refAtomList) > len(targetAtomList):
raise RuntimeError(
"Cannot keep matching as refAtomList is > targetAtomList for residue {}\nRef: {}\nTrg: {}".format(
targetResSeq, rnames, [x.name for x in targetAtomList]
)
)
# Remove any not matching in the target
alist = []
for atom in targetAtomList:
if atom.name in rnames:
alist.append(atom)
# List now only contains matching atoms
targetAtomList = alist
# Now just have matching so output in the correct order
for refname in rnames:
for i, atom in enumerate(targetAtomList):
if atom.name == refname:
# Found the matching atom
# Change resSeq and write out
atom.resSeq = refResSeq
outfh.write(atom.toLine() + "\n")
# now delete both this atom and the line
targetAtomList.pop(i)
# jump out of inner loop
break
return
# Go through refpdb and find which refResidues are present
refResidues = []
targetResSeq = [] # ordered list of tuples - ( resSeq, [ list_of_atoms_for_that_residue ] )
last = None
chain = -1
for line in open(refpdb, 'r'):
if line.startswith("MODEL"):
raise RuntimeError("Multi-model file!")
if line.startswith("TER"):
break
if line.startswith("ATOM"):
a = pdb_model.PdbAtom(line)
# First atom/chain
if chain == -1:
chain = a.chainID
if a.chainID != chain:
raise RuntimeError("ENCOUNTERED ANOTHER CHAIN! {0}".format(line))
if a.resSeq != last:
last = a.resSeq
# Add the corresponding resSeq in the target
targetResSeq.append(resSeqMap.target2ref(a.resSeq))
refResidues.append((a.resSeq, [a]))
else:
refResidues[-1][1].append(a)
# Now read in target pdb and output everything bar the atoms in this file that
# don't match those in the refpdb
t = open(targetpdb, 'r')
out = open(outpdb, 'w')
chain = None # The chain we're reading
residue = None # the residue we're reading
targetAtomList = []
for line in t:
if line.startswith("MODEL"):
raise RuntimeError("Multi-model file!")
if line.startswith("ANISOU"):
raise RuntimeError("I cannot cope with ANISOU! {0}".format(line))
# Stop at TER
if line.startswith("TER"):
_output_residue(refResidues, targetAtomList, resSeqMap, out)
# we write out our own TER
out.write("TER\n")
continue
if line.startswith("ATOM"):
atom = pdb_model.PdbAtom(line)
# First atom/chain
if chain == None:
chain = atom.chainID
if atom.chainID != chain:
raise RuntimeError("ENCOUNTERED ANOTHER CHAIN! {0}".format(line))
if atom.resSeq in targetResSeq:
# If this is the first one add the empty tuple and reset residue
if atom.resSeq != residue:
if residue != None: # Dont' write out owt for first atom
_output_residue(refResidues, targetAtomList, resSeqMap, out)
targetAtomList = []
residue = atom.resSeq
# If not first keep adding
targetAtomList.append(atom)
# We don't write these out as we write them with _output_residue
continue
else:
# discard this line as not a matching atom
continue
# For time being exclude all HETATM lines
elif line.startswith("HETATM"):
continue
# Endif line.startswith("ATOM")
# Output everything else
out.write(line)
# End reading loop
t.close()
out.close()
return
def get_info(inpath):
"""Read a PDB and extract as much information as possible into a PdbInfo object
"""
info = pdb_model.PdbInfo()
info.pdb = inpath
currentModel = None
currentChain = -1
modelAtoms = [] # list of models, each of which is a list of chains with the list of atoms
# Go through refpdb and find which ref_residues are present
f = open(inpath, 'r')
line = f.readline()
while line:
# First line of title
if line.startswith('HEADER'):
info.pdbCode = line[62:66].strip()
# First line of title
if line.startswith('TITLE') and not info.title:
info.title = line[10:-1].strip()
if line.startswith("REMARK"):
try:
numRemark = int(line[7:10])
except ValueError:
line = f.readline()
continue
# Resolution
if numRemark == 2:
line = f.readline()
if line.find("RESOLUTION") != -1:
try:
info.resolution = float(line[25:30])
except ValueError:
# RESOLUTION. NOT APPLICABLE.
info.resolution = -1
# Get solvent content
if numRemark == 280:
maxread = 5
# Clunky - read up to maxread lines to see if we can get the information we're after
# We assume the floats are at the end of the lines
for _ in range(maxread):
line = f.readline()
if line.find("SOLVENT CONTENT") != -1:
try:
info.solventContent = float(line.split()[-1])
except ValueError:
# Leave as None
pass
if line.find("MATTHEWS COEFFICIENT") != -1:
try:
info.matthewsCoefficient = float(line.split()[-1])
except ValueError:
# Leave as None
pass
# End REMARK
if line.startswith("CRYST1"):
try:
info.crystalInfo = pdb_model.CrystalInfo(line)
except ValueError as e:
logger.critical("ERROR READING CRYST1 LINE in file %s\":%s\"\n%s", inpath, line.rstrip(), e)
info.crystalInfo = None
if line.startswith("MODEL"):
if currentModel:
# Need to make sure that we have an id if only 1 chain and none given
if len(currentModel.chains) <= 1:
if currentModel.chains[0] == None:
currentModel.chains[0] = 'A'
info.models.append(currentModel)
# New/first model
currentModel = pdb_model.PdbModel()
# Get serial
currentModel.serial = int(line.split()[1])
currentChain = None
modelAtoms.append([])
# Count chains (could also check against the COMPND line if present?)
if line.startswith('ATOM'):
# Create atom object
atom = pdb_model.PdbAtom(line)
# Check for the first model
if not currentModel:
# This must be the first model and there should only be one
currentModel = pdb_model.PdbModel()
modelAtoms.append([])
if atom.chainID != currentChain:
currentChain = atom.chainID
currentModel.chains.append(currentChain)
modelAtoms[-1].append([])
modelAtoms[-1][-1].append(atom)
# Can ignore TER and ENDMDL for time being as we'll pick up changing chains anyway,
# and new models get picked up by the models line
line = f.readline()
# End while loop
# End of reading loop so add the last model to the list
info.models.append(currentModel)
f.close()
bbatoms = ['N', 'CA', 'C', 'O', 'CB']
# Now process the atoms
for modelIdx, model in enumerate(info.models):
chainList = modelAtoms[modelIdx]
for chainIdx, atomList in enumerate(chainList):
# Paranoid check
assert model.chains[chainIdx] == atomList[0].chainID
# Add list of atoms to model
model.atoms.append(atomList)
# Initialise new chain
currentResSeq = atomList[0].resSeq
currentResName = atomList[0].resName
model.resSeqs.append([])
model.sequences.append("")
model.caMask.append([])
model.bbMask.append([])
atomTypes = []
for i, atom in enumerate(atomList):
aname = atom.name.strip()
if atom.resSeq != currentResSeq and i == len(atomList) - 1:
# Edge case - last residue containing one atom
atomTypes = [aname]
else:
if aname not in atomTypes:
atomTypes.append(aname)
if atom.resSeq != currentResSeq or i == len(atomList) - 1:
# End of reading the atoms for a residue
model.resSeqs[chainIdx].append(currentResSeq)
model.sequences[chainIdx] += ample_util.three2one[currentResName]
if 'CA' not in atomTypes:
model.caMask[chainIdx].append(True)
else:
model.caMask[chainIdx].append(False)
missing = False
for bb in bbatoms:
if bb not in atomTypes:
missing = True
break
if missing:
model.bbMask[chainIdx].append(True)
else:
model.bbMask[chainIdx].append(False)
currentResSeq = atom.resSeq
currentResName = atom.resName
atomTypes = []
return info
def match_resseq(targetPdb=None, outPdb=None, resMap=None, sourcePdb=None):
assert sourcePdb or resMap
assert not (sourcePdb and resMap)
if not resMap:
resMap = residue_map.residueSequenceMap(targetPdb, sourcePdb)
chain = None # The chain we're reading
with open(targetPdb, 'r') as target, open(outPdb, 'w') as out:
for line in target:
if line.startswith("MODEL"):
raise RuntimeError("Multi-model file!")
if line.startswith("ANISOU"):
raise RuntimeError("I cannot cope with ANISOU! {0}".format(line))
# Stop at TER
if line.startswith("TER"):
pass
if line.startswith("ATOM"):
atom = pdb_model.PdbAtom(line)
# First atom/chain
if chain == None:
chain = atom.chainID
if atom.chainID != chain:
pass
# Get the matching resSeq for the model
modelResSeq = resMap.ref2target(atom.resSeq)
if modelResSeq == atom.resSeq:
out.write(line)
else:
atom.resSeq = modelResSeq
out.write(atom.toLine() + "\n")
continue
out.write(line)
def merge_chains(pdbin, pdbout, chains=None):
"""Merge pdb chains.
If no chains argument is given merge all chains into the first chain, otherwise merge
all but the first chain in chains into the first chain in chains.
Parameters
----------
pdbin : file
Source pdb to merge chains from
pdbout : file
pdb output file for single chain pdb
chains : list
list of chain ids - if provided all chains in the list will be merged into the first.
Returns
-------
pdbout : file
pdb output file for single chain pdb
"""
hin = iotbx.pdb.pdb_input(file_name=pdbin).construct_hierarchy()
hout = _merge_chains(hin, chains=chains)
with open(pdbout, 'w') as f:
f.write("REMARK Original file:{}\n".format(pdbin))
f.write(hout.as_pdb_string(anisou=False))
return pdbout
def _merge_chains(hierarchy, chains=None):
"""Merge pdb chains in hierarchy.
Parameters
----------
hierarchy : cctbx_pdb_hierarchy
The original CCTBX PDB hierarchy
chains : list
list of chain ids - if provided all chains in the list will be merged into the first.
Returns
-------
hierarchy : cctbx_pdb_hierarchy
New hierarchhy
"""
# Make sure we can find the required chain ids
chain_ids = [chain.id for chain in hierarchy.models()[0].chains()]
if chains:
chains = copy.copy(
chains
) # Make sure we're not altering the given arg so we can be called multiple times in a loop
assert isinstance(chains, list) and len(chains) > 1, "Need list of more than one chain {}".format(chains)
root_chain_id = chains.pop(0)
if root_chain_id not in chain_ids:
raise RuntimeError("Cannot find root_chain_id {} in chain ids {}".format(root_chain_id, chain_ids))
if not set(chains).issubset(set(chain_ids)):
raise RuntimeError("Cannot find all chains {} in {}".format(chains, chain_ids))
else:
# append all chains to the first chain
root_chain_id = hierarchy.models()[0].chains()[0].id
root_chain_idx = chain_ids.index(root_chain_id)
root_chain = hierarchy.models()[0].chains()[root_chain_idx].detached_copy()
for i, chain in enumerate(hierarchy.models()[0].chains()):
if i == root_chain_idx:
continue
if chains and chain.id not in chains:
continue
if not chain.is_protein():
continue
for r in chain.residue_groups():
root_chain.append_residue_group(r.detached_copy())
new_model = iotbx.pdb.hierarchy.model()
new_model.append_chain(root_chain)
new_hierarchy = iotbx.pdb.hierarchy.root()
new_hierarchy.append_model((new_model))
_renumber(new_hierarchy)
return new_hierarchy
def merge(pdb1=None, pdb2=None, pdbout=None):
"""Merge two pdb files into one"""
logfile = pdbout + ".log"
cmd = ['pdb_merge', 'xyzin1', pdb1, 'xyzin2', pdb2, 'xyzout', pdbout]
stdin = 'nomerge'
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error merging pdbs: {0} {1}".format(pdb1, pdb2))
def molecular_weight(pdbin):
logfile = "rwcontents.log"
_run_rwcontents(pdbin, logfile)
_, _, mw = _parse_rwcontents(logfile)
os.unlink(logfile)
return mw
def num_atoms_and_residues(pdbin, first=False):
""""Return number of atoms and residues in a pdb file.
If all is True, return all atoms and residues, else just for the first chain in the first model'
"""
# pdb_obj = iotbx.pdb.hierarchy.input(file_name=pdbin)
# model = pdb_obj.hierarchy.models()[0]
# return sum( [ len( chain.residues() ) for chain in model.chains() ] )
if not first:
logfile = "rwcontents.log"
_run_rwcontents(pdbin, logfile)
natoms, nresidues, _ = _parse_rwcontents(logfile)
os.unlink(logfile)
else:
pdb_obj = iotbx.pdb.hierarchy.input(file_name=pdbin)
model = pdb_obj.hierarchy.models()[0]
nresidues = len(model.chains()[0].residues())
natoms = len(model.chains()[0].atoms())
assert natoms > 0 and nresidues > 0
return (natoms, nresidues)
def _only_equal_sizes(hierarchy):
"""If a hiearchy contains different size models, only keep models of the most numerous size"""
lengths = defaultdict(list)
lmax = 0
for i, model in enumerate(hierarchy.models()):
l = model.chains()[0].residue_groups_size()
lengths[l].append(i)
lmax = max(lmax, l)
if len(lengths) > 1:
# The pdbs were of different lengths
to_keep = lengths[lmax]
logger.debug('All models were not of the same length, only {0} will be kept.'.format(len(to_keep)))
# Delete any that are not of most numerous length
for i, model in enumerate(hierarchy.models()):
if i not in to_keep:
hierarchy.remove_model(model)
return hierarchy
def _parse_rwcontents(logfile):
natoms = 0
nresidues = 0
molecular_weight = 0
with open(logfile) as f:
for line in f:
if line.startswith(" Number of amino-acids residues"):
nresidues = int(line.strip().split()[5])
# Total number of protein atoms (including hydrogens)
if line.startswith(" Total number of atoms (including hydrogens)"):
natoms = int(float(line.strip().split()[6]))
if line.startswith(" Molecular Weight of protein:"):
molecular_weight = float(line.strip().split()[4])
return natoms, nresidues, molecular_weight
def _run_rwcontents(pdbin, logfile):
logfile = os.path.abspath(logfile)
cmd = ['rwcontents', 'xyzin', pdbin]
stdin = '' # blank to trigger EOF
retcode = ample_util.run_command(cmd=cmd, directory=os.getcwd(), logfile=logfile, stdin=stdin)
if retcode != 0:
raise RuntimeError("Error running cmd {0}\nSee logfile: {1}".format(cmd, logfile))
return
def _parse_modres(modres_text):
"""
COLUMNS DATA TYPE FIELD DEFINITION
--------------------------------------------------------------------------------
1 - 6 Record name "MODRES"
8 - 11 IDcode idCode ID code of this entry.
13 - 15 Residue name resName Residue name used in this entry.
17 Character chainID Chain identifier.
19 - 22 Integer seqNum Sequence number.
23 AChar iCode Insertion code.
25 - 27 Residue name stdRes Standard residue name.
30 - 70 String comment Description of the residue modification.
"""
modres = []
for line in modres_text:
assert line[0:6] == "MODRES", "Line did not begin with an MODRES record!: {0}".format(line)
idCode = line[7:11]
resName = line[12:15].strip()
# Use for all so None means an empty field
if line[16].strip():
chainID = line[16]
seqNum = int(line[18:22])
iCode = ""
if line[22].strip():
iCode = line[22]
stdRes = line[24:27].strip()
comment = ""
if line[29:70].strip():
comment = line[29:70].strip()
modres.append([idCode, resName, chainID, seqNum, iCode, stdRes, comment])
return modres
def reliable_sidechains(inpath=None, outpath=None):
"""Only output non-backbone atoms for residues in the res_names list.
"""
# Remove sidechains that are in res_names where the atom name is not in atom_names
res_names = ['MET', 'ASP', 'PRO', 'GLN', 'LYS', 'ARG', 'GLU', 'SER']
atom_names = ['N', 'CA', 'C', 'O', 'CB']
pdb_in = open(inpath, "r")
pdb_out = open(outpath, "w")
for pdbline in pdb_in:
pdb_pattern = re.compile('^ATOM\s*(\d*)\s*(\w*)\s*(\w*)\s*(\w)\s*(\d*)\s')
pdb_result = pdb_pattern.match(pdbline)
# Check ATOM line and for residues in res_name, skip any that are not in atom names
if pdb_result:
pdb_result2 = re.split(pdb_pattern, pdbline)
if pdb_result2[3] in res_names and not pdb_result2[2] in atom_names:
continue
# Write out everything else
pdb_out.write(pdbline)
# End for
pdb_out.close()
pdb_in.close()
return
def reliable_sidechains_cctbx(pdbin=None, pdbout=None):
"""Only output non-backbone atoms for residues in the res_names list.
"""
# Remove sidechains that are in res_names where the atom name is not in atom_names
res_names = ['MET', 'ASP', 'PRO', 'GLN', 'LYS', 'ARG', 'GLU', 'SER']
atom_names = ['N', 'CA', 'C', 'O', 'CB']
pdb_input = iotbx.pdb.pdb_input(pdbin)
hierachy = pdb_input.construct_hierarchy()
# Remove HETATMS
for model in hierachy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
assert not residue_group.have_conformers(), "Fix for conformers"
if residue_group.unique_resnames()[0] not in res_names:
# removing whilst looping through?!? - maybe...
chain.remove_residue_group(residue_group)
continue
for atom_group in residue_group.atom_groups():
# Can't use below as it uses indexes which change as we remove atoms
# ag.atoms().extract_hetero()]
todel = [a for a in atom_group.atoms() if a.name.strip() in atom_names]
for a in todel:
atom_group.remove_atom(a)
# Need to get crystal info and include
hierachy.write_pdb_file(pdbout, anisou=False)
return
def rename_chains(inpdb=None, outpdb=None, fromChain=None, toChain=None):
"""Rename Chains
"""
if fromChain is None and isinstance(toChain, str):
allChain = toChain
else:
if len(fromChain) != len(toChain):
raise RuntimeError(
"rename_chains either needs a single to_chain or two list of equal length.\n"
"Got fromChain \'{}\' toChain: \'{}\'".format(fromChain, toChain)
)
logfile = outpdb + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpdb, outpdb).split()
if allChain:
stdin = "renchain /*/* {}".format(allChain)
else:
stdin = ""
for i in range(len(fromChain)):
stdin += "renchain {0} {1}\n".format(fromChain[i], toChain[i])
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error renaming chains {0}".format(fromChain))
def resseq(pdbin):
return _resseq(iotbx.pdb.pdb_input(pdbin).construct_hierarchy())
def _resseq(hierarchy):
"""Extract the sequence of residues from a pdb file."""
chain2data = sequence_util._sequence_data(hierarchy)
return dict((k, chain2data[k][1]) for k in chain2data.keys())
def renumber_residues(pdbin, pdbout, start=1):
""" Renumber the residues in the chain """
pdb_input = iotbx.pdb.pdb_input(file_name=pdbin)
hierarchy = pdb_input.construct_hierarchy()
_renumber(hierarchy, start)
with open(pdbout, 'w') as f:
f.write("REMARK Original file:\n")
f.write("REMARK {0}\n".format(pdbin))
f.write(hierarchy.as_pdb_string(anisou=False))
return
def _renumber(hierarchy, start=None):
for model in hierarchy.models():
for chain in model.chains():
for idx, residue_group in enumerate(chain.residue_groups()):
if start is None:
start = int(residue_group.resseq)
continue
residue_group.resseq = idx + start
return
def renumber_residues_gaps(pdbin, pdbout, gaps, start=1):
"""
Renumber the residues in the chain based on specified gaps
Parameters
----------
pdbin : str
pdbout : str
gaps : list
List containing True/False for gaps
"""
pdb_input = iotbx.pdb.pdb_input(file_name=pdbin)
hierarchy = pdb_input.construct_hierarchy()
for model in hierarchy.models():
for chain in model.chains():
resseq = 0
for idx, is_gap in enumerate(gaps):
if is_gap:
continue
try:
residue_group = chain.residue_groups()[resseq]
except:
pass
else:
residue_group.resseq = idx + start
finally:
resseq += 1
with open(pdbout, 'w') as f:
f.write("REMARK Original file:\n")
f.write("REMARK {0}\n".format(pdbin))
f.write(hierarchy.as_pdb_string(anisou=False))
return
def select_residues(pdbin, pdbout, delete=None, tokeep=None, delete_idx=None, tokeep_idx=None):
pdbf = iotbx.file_reader.any_file(pdbin, force_type="pdb")
pdbf.check_file_type("pdb")
hierarchy = pdbf.file_object.construct_hierarchy()
crystal_symmetry = pdbf.file_object.crystal_symmetry()
if len(hierarchy.models()) > 1 or len(hierarchy.models()[0].chains()) > 1:
logger.debug("pdb %s has > 1 model or chain - only first model/chain will be kept", pdbin)
hierarchy = _select_residues(hierarchy, delete=delete, tokeep=tokeep, delete_idx=delete_idx, tokeep_idx=tokeep_idx)
# hierarchy.write_pdb_file(pdbout,anisou=False)
with open(pdbout, 'w') as f:
f.write("REMARK Original file:\n")
f.write("REMARK {0}\n".format(pdbin))
if crystal_symmetry is not None:
f.write(
iotbx.pdb.format_cryst1_and_scale_records(crystal_symmetry=crystal_symmetry, write_scale_records=True)
+ "\n"
)
f.write(hierarchy.as_pdb_string(anisou=False))
return
def _select_residues(hierarchy, delete=None, tokeep=None, delete_idx=None, tokeep_idx=None):
if len(hierarchy.models()) > 1:
for i, m in enumerate(hierarchy.models()):
if i != 0:
hierarchy.remove_model(m)
model = hierarchy.models()[0]
if len(model.chains()) > 1:
for i, c in enumerate(model.chains()):
if i != 0:
model.remove_chain(c)
chain = model.chains()[0]
idx = -1
for residue_group in chain.residue_groups():
# We ignore hetatms when indexing as we are concerned with residue indexes
if (delete_idx or tokeep_idx) and any([atom.hetero for atom in residue_group.atoms()]):
continue
idx += 1
remove = False
if delete and residue_group.resseq_as_int() in delete:
remove = True
elif delete_idx and idx in delete:
remove = True
elif tokeep and residue_group.resseq_as_int() not in tokeep:
remove = True
elif tokeep_idx and idx not in tokeep_idx:
remove = True
if remove:
chain.remove_residue_group(residue_group)
return hierarchy
def split_pdb(pdbin, directory=None, strip_hetatm=False, same_size=False):
"""Split a pdb file into its separate models
Parameters
----------
pdbin : str
path to input pdbf file
directory : str
path to directory where pdb files will be created
strip_hetatm : bool
remove HETATMS if true
same_size : bool
Only output models of equal length (the most numerous length is selected)
"""
if directory is None:
directory = os.path.dirname(pdbin)
if not os.path.isdir(directory):
os.mkdir(directory)
# Largely stolen from pdb_split_models.py in phenix
# http://cci.lbl.gov/cctbx_sources/iotbx/command_line/pdb_split_models.py
pdbf = iotbx.file_reader.any_file(pdbin, force_type="pdb")
pdbf.check_file_type("pdb")
hierarchy = pdbf.file_object.construct_hierarchy()
# Nothing to do
n_models = hierarchy.models_size()
if same_size:
_only_equal_sizes(hierarchy)
crystal_symmetry = pdbf.file_object.crystal_symmetry()
output_files = []
for k, model in enumerate(hierarchy.models()):
k += 1
new_hierarchy = iotbx.pdb.hierarchy.root()
new_hierarchy.append_model(model.detached_copy())
if strip_hetatm:
_strip(new_hierarchy, hetatm=True)
if model.id == "":
model_id = str(k)
else:
model_id = model.id.strip()
output_file = ample_util.filename_append(pdbin, model_id, directory)
with open(output_file, "w") as f:
if crystal_symmetry is not None:
f.write(
iotbx.pdb.format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry, write_scale_records=True
)
+ '\n'
)
f.write("REMARK Model %d of %d\n" % (k, n_models))
if pdbin is not None:
f.write('REMARK Original file:\n')
f.write('REMARK %s\n' % pdbin)
f.write(new_hierarchy.as_pdb_string())
output_files.append(output_file)
return output_files
def split_into_chains(pdbin, chain=None, directory=None):
"""Split a pdb file into its separate chains"""
if directory is None:
directory = os.path.dirname(pdbin)
# Largely stolen from pdb_split_models.py in phenix
# http://cci.lbl.gov/cctbx_sources/iotbx/command_line/pdb_split_models.py
pdbf = iotbx.file_reader.any_file(pdbin, force_type="pdb")
pdbf.check_file_type("pdb")
hierarchy = pdbf.file_object.construct_hierarchy()
# Nothing to do
n_models = hierarchy.models_size()
if n_models != 1:
raise RuntimeError("split_into_chains only works with single-mdoel pdbs!")
crystal_symmetry = pdbf.file_object.crystal_symmetry()
output_files = []
n_chains = len(hierarchy.models()[0].chains())
for i, hchain in enumerate(hierarchy.models()[0].chains()):
if not hchain.is_protein():
continue
if chain and not hchain.id == chain:
continue
new_hierarchy = iotbx.pdb.hierarchy.root()
new_model = iotbx.pdb.hierarchy.model()
new_hierarchy.append_model((new_model))
new_model.append_chain(hchain.detached_copy())
output_file = ample_util.filename_append(pdbin, hchain.id, directory)
with open(output_file, "w") as f:
if crystal_symmetry is not None:
f.write(
iotbx.pdb.format_cryst1_and_scale_records(
crystal_symmetry=crystal_symmetry, write_scale_records=True
)
+ '\n'
)
f.write('REMARK Chain %d of %d\n' % (i, n_chains))
if pdbin is not None:
f.write('REMARK Original file:\n')
f.write('REMARK %s\n' % pdbin)
f.write(new_hierarchy.as_pdb_string())
output_files.append(output_file)
if not len(output_files):
raise RuntimeError("split_into_chains could not find any chains to split")
return output_files
def standardise(pdbin, pdbout, chain=None, del_hetatm=False):
"""Rename any non-standard AA, remove solvent and only keep most probably conformation.
"""
tmp1 = ample_util.tmp_file_name() + ".pdb" # pdbcur insists names have a .pdb suffix
# Now clean up with pdbcur
logfile = tmp1 + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(pdbin, tmp1).split()
stdin = """delsolvent
noanisou
mostprob
"""
# We are extracting one of the chains
if chain:
stdin += "lvchain {0}\n".format(chain)
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
os.unlink(logfile)
else:
raise RuntimeError("Error standardising pdb!")
# Standardise AA names and then remove any remaining HETATMs
std_residues_cctbx(tmp1, pdbout, del_hetatm=del_hetatm)
os.unlink(tmp1)
return retcode
def std_residues_cctbx(pdbin, pdbout, del_hetatm=False):
"""Map all residues in MODRES section to their standard counterparts
optionally delete all other HETATMS"""
pdb_input = iotbx.pdb.pdb_input(pdbin)
crystal_symmetry = pdb_input.crystal_symmetry()
# Get MODRES Section & build up dict mapping the changes
modres_text = [l.strip() for l in pdb_input.primary_structure_section() if l.startswith("MODRES")]
modres = {}
for id, resname, chain, resseq, icode, stdres, comment in _parse_modres(modres_text):
if not chain in modres:
modres[chain] = {}
modres[chain][int(resseq)] = (resname, stdres)
hierachy = pdb_input.construct_hierarchy()
for model in hierachy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
resseq = residue_group.resseq_as_int()
for atom_group in residue_group.atom_groups():
resname = atom_group.resname
if chain.id in modres and resseq in modres[chain.id] and modres[chain.id][resseq][0] == resname:
# Change modified name to std name
# assert modres[chain.id][resseq][0]==resname,\
# "Unmatched names: {0} : {1}".format(modres[chain.id][resseq][0],resname)
atom_group.resname = modres[chain.id][resseq][1]
# If any of the atoms are hetatms, set them to be atoms
for atom in atom_group.atoms():
if atom.hetero:
atom.hetero = False
if del_hetatm:
_strip(hierachy, hetatm=True)
with open(pdbout, 'w') as f:
f.write("REMARK Original file:\n")
f.write("REMARK {0}\n".format(pdbin))
if crystal_symmetry is not None:
f.write(
iotbx.pdb.format_cryst1_and_scale_records(crystal_symmetry=crystal_symmetry, write_scale_records=True)
+ "\n"
)
f.write(hierachy.as_pdb_string(anisou=False))
return
def strip(pdbin, pdbout, hetatm=False, hydrogen=False, atom_types=[]):
assert hetatm or hydrogen or atom_types, "Need to set what to strip!"
pdb_input = iotbx.pdb.pdb_input(pdbin)
crystal_symmetry = pdb_input.crystal_symmetry()
hierachy = pdb_input.construct_hierarchy()
_strip(hierachy, hetatm=hetatm, hydrogen=hydrogen, atom_types=atom_types)
with open(pdbout, 'w') as f:
f.write("REMARK Original file:\n")
f.write("REMARK {0}\n".format(pdbin))
if crystal_symmetry is not None:
f.write(
iotbx.pdb.format_cryst1_and_scale_records(crystal_symmetry=crystal_symmetry, write_scale_records=True)
+ "\n"
)
f.write(hierachy.as_pdb_string(anisou=False))
return
def _strip(hierachy, hetatm=False, hydrogen=False, atom_types=[]):
"""Remove all hetatoms from pdbfile"""
def remove_atom(atom, hetatm=False, hydrogen=False, atom_types=[]):
return (hetatm and atom.hetero) or (hydrogen and atom.element_is_hydrogen()) or atom.name.strip() in atom_types
for model in hierachy.models():
for chain in model.chains():
for residue_group in chain.residue_groups():
for atom_group in residue_group.atom_groups():
to_del = [
a
for a in atom_group.atoms()
if remove_atom(a, hetatm=hetatm, hydrogen=hydrogen, atom_types=atom_types)
]
for atom in to_del:
atom_group.remove_atom(atom)
return
def translate(inpdb=None, outpdb=None, ftranslate=None):
"""translate pdb
args:
ftranslate -- vector of fractional coordinates to shift by
"""
logfile = outpdb + ".log"
cmd = "pdbcur xyzin {0} xyzout {1}".format(inpdb, outpdb).split()
# Build up stdin
stdin = 'translate * frac {0:F} {1:F} {2:F}'.format(ftranslate[0], ftranslate[1], ftranslate[2])
retcode = ample_util.run_command(cmd=cmd, logfile=logfile, directory=os.getcwd(), dolog=False, stdin=stdin)
if retcode == 0:
# remove temporary files
os.unlink(logfile)
else:
raise RuntimeError("Error translating PDB")
def xyz_coordinates(pdbin):
"""Extract xyz for all atoms """
pdb_input = iotbx.pdb.pdb_input(file_name=pdbin)
hierarchy = pdb_input.construct_hierarchy()
return _xyz_coordinates(hierarchy)
def _xyz_coordinates(hierarchy):
res_lst, tmp = [], []
for residue_group in hierarchy.models()[0].chains()[0].residue_groups():
for atom_group in residue_group.atom_groups():
for atom in atom_group.atoms():
tmp.append(atom.xyz)
res_lst.append([residue_group.resseq_as_int(), tmp])
tmp = []
return res_lst
def xyz_cb_coordinates(pdbin):
"""Extract xyz for CA/CB atoms """
pdb_input = iotbx.pdb.pdb_input(file_name=pdbin)
hierarchy = pdb_input.construct_hierarchy()
res_dict = _xyz_cb_coordinates(hierarchy)
cb_lst = []
for i in xrange(len(res_dict)):
if len(res_dict[i]) > 1:
cb_lst.append(res_dict[i][1])
elif len(res_dict[i]) == 1:
cb_lst.append(res_dict[i][0])
return cb_lst
def _xyz_cb_coordinates(hierarchy):
res_lst = []
for residue_group in hierarchy.models()[0].chains()[0].residue_groups():
for atom_group in residue_group.atom_groups():
xyz_lst = _xyz_atom_coords(atom_group)
res_lst.append([residue_group.resseq_as_int(), xyz_lst])
return res_lst
def _xyz_atom_coords(atom_group):
"""Use this method if you need to identify if CB is present in atom_group and if not return CA"""
tmp_dict = {}
for atom in atom_group.atoms():
if atom.name.strip() in {"CA", "CB"}:
tmp_dict[atom.name.strip()] = atom.xyz
if 'CB' in tmp_dict:
return tmp_dict['CB']
elif 'CA' in tmp_dict:
return tmp_dict['CA']
else:
return float('inf'), float('inf'), float('inf')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description='Manipulate PDB files', prefix_chars="-")
group = parser.add_mutually_exclusive_group()
group.add_argument('-ren', action='store_true', help="Renumber the PDB")
group.add_argument('-std', action='store_true', help='Standardise the PDB')
group.add_argument('-seq', action='store_true', help='Write a fasta of the found AA to stdout')
group.add_argument('-split_models', action='store_true', help='Split a pdb into constituent models')
group.add_argument('-split_chains', action='store_true', help='Split a pdb into constituent chains')
parser.add_argument('input_file', help='The input file - will not be altered')
parser.add_argument('-o', dest='output_file', help='The output file - will be created')
parser.add_argument('-chain', help='The chain to use')
parser.add_argument('-test', action='store_true', help='Run unittests')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
if args.test:
logging.debug(unittest.TestLoader().loadTestsFromModule(sys.modules[__name__]))
sys.exit(unittest.TextTestRunner().run(unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])))
# Get full paths to all files
args.input_file = os.path.abspath(args.input_file)
if not os.path.isfile(args.input_file):
raise RuntimeError("Cannot find input file: {}".format(args.input_file))
if args.output_file:
args.output_file = os.path.abspath(args.output_file)
else:
n = os.path.splitext(os.path.basename(args.input_file))[0]
args.output_file = n + "_std.pdb"
if args.ren:
renumber_residues(args.input_file, args.output_file, start=1)
elif args.std:
standardise(args.input_file, args.output_file, del_hetatm=True, chain=args.chain)
elif args.seq:
logging.debug(sequence_util.Sequence(pdb=args.input_file).fasta_str())
elif args.split_models:
logging.debug(split_pdb(args.input_file))
elif args.split_chains:
logging.debug(split_into_chains(args.input_file, chain=args.chain))
elif args.chain:
logging.debug(extract_chain(args.input_file, args.output_file, chainID=args.chain))
|
linucks/ample
|
ample/util/pdb_edit.py
|
Python
|
bsd-3-clause
| 46,394
|
[
"CRYSTAL"
] |
5ce6987f0d7852caf85611559c5fb827c0c2602489d5b17248ecd845e338ac11
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.