repository_name
stringclasses 316
values | func_path_in_repository
stringlengths 6
223
| func_name
stringlengths 1
134
| language
stringclasses 1
value | func_code_string
stringlengths 57
65.5k
| func_documentation_string
stringlengths 1
46.3k
| split_name
stringclasses 1
value | func_code_url
stringlengths 91
315
| called_functions
listlengths 1
156
⌀ | enclosing_scope
stringlengths 2
1.48M
|
|---|---|---|---|---|---|---|---|---|---|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_bareground_mask
|
python
|
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
|
Generate raster mask for exposed bare ground from global bareground data
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L143-L158
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_snodas_ds
|
python
|
def get_snodas_ds(dem_dt, code=1036):
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
|
Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L160-L228
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_modis_tile_list
|
python
|
def get_modis_tile_list(ds):
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
|
Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L230-L249
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
get_modscag_fn_list
|
python
|
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
|
Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L251-L331
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def proc_modscag(fn_list, extent=None, t_srs=None):
"""Process the MODSCAG products for full date range, create composites and reproject
"""
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/dem_mask.py
|
proc_modscag
|
python
|
def proc_modscag(fn_list, extent=None, t_srs=None):
#Use cubic spline here for improve upsampling
ds_list = warplib.memwarp_multi_fn(fn_list, res='min', extent=extent, t_srs=t_srs, r='cubicspline')
stack_fn = os.path.splitext(fn_list[0])[0] + '_' + os.path.splitext(os.path.split(fn_list[-1])[1])[0] + '_stack_%i' % len(fn_list)
#Create stack here - no need for most of mastack machinery, just make 3D array
#Mask values greater than 100% (clouds, bad pixels, etc)
ma_stack = np.ma.array([np.ma.masked_greater(iolib.ds_getma(ds), 100) for ds in np.array(ds_list)], dtype=np.uint8)
stack_count = np.ma.masked_equal(ma_stack.count(axis=0), 0).astype(np.uint8)
stack_count.set_fill_value(0)
stack_min = ma_stack.min(axis=0).astype(np.uint8)
stack_min.set_fill_value(0)
stack_max = ma_stack.max(axis=0).astype(np.uint8)
stack_max.set_fill_value(0)
stack_med = np.ma.median(ma_stack, axis=0).astype(np.uint8)
stack_med.set_fill_value(0)
out_fn = stack_fn + '_count.tif'
iolib.writeGTiff(stack_count, out_fn, ds_list[0])
out_fn = stack_fn + '_max.tif'
iolib.writeGTiff(stack_max, out_fn, ds_list[0])
out_fn = stack_fn + '_min.tif'
iolib.writeGTiff(stack_min, out_fn, ds_list[0])
out_fn = stack_fn + '_med.tif'
iolib.writeGTiff(stack_med, out_fn, ds_list[0])
ds = gdal.Open(out_fn)
return ds
|
Process the MODSCAG products for full date range, create composites and reproject
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/dem_mask.py#L333-L362
| null |
#! /usr/bin/env python
"""
Utility to automate reference surface identification for raster co-registration
Note: Initial run may take a long time to download and process required data (NLCD, global bareground, glacier polygons)
Can control location of these data files with DATADIR environmental variable
export DATADIR=dir
Dependencies: gdal, wget, requests, bs4
"""
#To do:
#Integrate 1-km LULC data: http://www.landcover.org/data/landcover/
#TODO: need to clean up toa handling
import sys
import os
import subprocess
import glob
import argparse
from osgeo import gdal, ogr, osr
import numpy as np
from datetime import datetime, timedelta
from pygeotools.lib import iolib, warplib, geolib, timelib
datadir = iolib.get_datadir()
def get_nlcd_fn():
"""Calls external shell script `get_nlcd.sh` to fetch:
2011 Land Use Land Cover (nlcd) grids, 30 m
http://www.mrlc.gov/nlcd11_leg.php
"""
#This is original filename, which requires ~17 GB
#nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.img')
#get_nlcd.sh now creates a compressed GTiff, which is 1.1 GB
nlcd_fn = os.path.join(datadir, 'nlcd_2011_landcover_2011_edition_2014_10_10/nlcd_2011_landcover_2011_edition_2014_10_10.tif')
if not os.path.exists(nlcd_fn):
cmd = ['get_nlcd.sh',]
#subprocess.call(cmd)
sys.exit("Missing nlcd data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
return nlcd_fn
def get_bareground_fn():
"""Calls external shell script `get_bareground.sh` to fetch:
~2010 global bare ground, 30 m
Note: unzipped file size is 64 GB! Original products are uncompressed, and tiles are available globally (including empty data over ocean)
The shell script will compress all downloaded tiles using lossless LZW compression.
http://landcover.usgs.gov/glc/BareGroundDescriptionAndDownloads.php
"""
bg_fn = os.path.join(datadir, 'bare2010/bare2010.vrt')
if not os.path.exists(bg_fn):
cmd = ['get_bareground.sh',]
sys.exit("Missing bareground data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return bg_fn
#Download latest global RGI glacier db
def get_glacier_poly():
"""Calls external shell script `get_rgi.sh` to fetch:
Randolph Glacier Inventory (RGI) glacier outline shapefiles
Full RGI database: rgi50.zip is 410 MB
The shell script will unzip and merge regional shp into single global shp
http://www.glims.org/RGI/
"""
#rgi_fn = os.path.join(datadir, 'rgi50/regions/rgi50_merge.shp')
#Update to rgi60, should have this returned from get_rgi.sh
rgi_fn = os.path.join(datadir, 'rgi60/regions/rgi60_merge.shp')
if not os.path.exists(rgi_fn):
cmd = ['get_rgi.sh',]
sys.exit("Missing rgi glacier data source. If already downloaded, specify correct datadir. If not, run `%s` to download" % cmd[0])
#subprocess.call(cmd)
return rgi_fn
#Update glacier polygons
def get_icemask(ds, glac_shp_fn=None):
"""Generate glacier polygon raster mask for input Dataset res/extent
"""
print("Masking glaciers")
if glac_shp_fn is None:
glac_shp_fn = get_glacier_poly()
if not os.path.exists(glac_shp_fn):
print("Unable to locate glacier shp: %s" % glac_shp_fn)
else:
print("Found glacier shp: %s" % glac_shp_fn)
#All of the proj, extent, handling should now occur in shp2array
icemask = geolib.shp2array(glac_shp_fn, ds)
return icemask
#Create nlcd mask
def get_nlcd_mask(nlcd_ds, filter='not_forest', out_fn=None):
"""Generate raster mask for specified NLCD LULC filter
"""
print("Loading NLCD LULC")
b = nlcd_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Filtering NLCD LULC with: %s" % filter)
#Original nlcd products have nan as ndv
#12 - ice
#31 - rock
#11 - open water, includes rivers
#52 - shrub, <5 m tall, >20%
#42 - evergreeen forest
#Should use data dictionary here for general masking
#Using 'rock+ice+water' preserves the most pixels, although could be problematic over areas with lakes
if filter == 'rock':
mask = (l==31)
elif filter == 'rock+ice':
mask = np.logical_or((l==31),(l==12))
elif filter == 'rock+ice+water':
mask = np.logical_or(np.logical_or((l==31),(l==12)),(l==11))
elif filter == 'not_forest':
mask = ~(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)))
elif filter == 'not_forest+not_water':
mask = ~(np.logical_or(np.logical_or(np.logical_or((l==41),(l==42)),(l==43)),(l==11)))
else:
print("Invalid mask type")
mask = None
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, nlcd_ds)
l = None
return mask
def get_bareground_mask(bareground_ds, bareground_thresh=60, out_fn=None):
"""Generate raster mask for exposed bare ground from global bareground data
"""
print("Loading bareground")
b = bareground_ds.GetRasterBand(1)
l = b.ReadAsArray()
print("Masking pixels with <%0.1f%% bare ground" % bareground_thresh)
if bareground_thresh < 0.0 or bareground_thresh > 100.0:
sys.exit("Invalid bare ground percentage")
mask = (l>bareground_thresh)
#Write out original data
if out_fn is not None:
print("Writing out %s" % out_fn)
iolib.writeGTiff(l, out_fn, bareground_ds)
l = None
return mask
def get_snodas_ds(dem_dt, code=1036):
"""Function to fetch and process SNODAS snow depth products for input datetime
http://nsidc.org/data/docs/noaa/g02158_snodas_snow_cover_model/index.html
Product codes:
1036 is snow depth
1034 is SWE
filename format: us_ssmv11036tS__T0001TTNATS2015042205HP001.Hdr
"""
import tarfile
import gzip
snodas_ds = None
snodas_url_str = None
outdir = os.path.join(datadir, 'snodas')
if not os.path.exists(outdir):
os.makedirs(outdir)
#Note: unmasked products (beyond CONUS) are only available from 2010-present
if dem_dt >= datetime(2003,9,30) and dem_dt < datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/masked/%Y/%m_%b/SNODAS_%Y%m%d.tar'
tar_subfn_str_fmt = 'us_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
elif dem_dt >= datetime(2010,1,1):
snodas_url_str = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02158/unmasked/%Y/%m_%b/SNODAS_unmasked_%Y%m%d.tar'
tar_subfn_str_fmt = './zz_ssmv1%itS__T0001TTNATS%%Y%%m%%d05HP001.%s.gz'
else:
print("No SNODAS data available for input date")
if snodas_url_str is not None:
snodas_url = dem_dt.strftime(snodas_url_str)
snodas_tar_fn = iolib.getfile(snodas_url, outdir=outdir)
print("Unpacking")
tar = tarfile.open(snodas_tar_fn)
#gunzip to extract both dat and Hdr files, tar.gz
for ext in ('dat', 'Hdr'):
tar_subfn_str = tar_subfn_str_fmt % (code, ext)
tar_subfn_gz = dem_dt.strftime(tar_subfn_str)
tar_subfn = os.path.splitext(tar_subfn_gz)[0]
print(tar_subfn)
if outdir is not None:
tar_subfn = os.path.join(outdir, tar_subfn)
if not os.path.exists(tar_subfn):
#Should be able to do this without writing intermediate gz to disk
tar.extract(tar_subfn_gz)
with gzip.open(tar_subfn_gz, 'rb') as f:
outf = open(tar_subfn, 'wb')
outf.write(f.read())
outf.close()
os.remove(tar_subfn_gz)
#Need to delete 'Created by module comment' line from Hdr, can contain too many characters
bad_str = 'Created by module comment'
snodas_fn = tar_subfn
f = open(snodas_fn)
output = []
for line in f:
if not bad_str in line:
output.append(line)
f.close()
f = open(snodas_fn, 'w')
f.writelines(output)
f.close()
#Return GDAL dataset for extracted product
snodas_ds = gdal.Open(snodas_fn)
return snodas_ds
def get_modis_tile_list(ds):
"""Helper function to identify MODIS tiles that intersect input geometry
modis_gird.py contains dictionary of tile boundaries (tile name and WKT polygon ring from bbox)
See: https://modis-land.gsfc.nasa.gov/MODLAND_grid.html
"""
from demcoreg import modis_grid
modis_dict = {}
for key in modis_grid.modis_dict:
modis_dict[key] = ogr.CreateGeometryFromWkt(modis_grid.modis_dict[key])
geom = geolib.ds_geom(ds)
geom_dup = geolib.geom_dup(geom)
ct = osr.CoordinateTransformation(geom_dup.GetSpatialReference(), geolib.wgs_srs)
geom_dup.Transform(ct)
tile_list = []
for key, val in list(modis_dict.items()):
if geom_dup.Intersects(val):
tile_list.append(key)
return tile_list
def get_modscag_fn_list(dem_dt, tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05'), pad_days=7):
"""Function to fetch and process MODSCAG fractional snow cover products for input datetime
Products are tiled in MODIS sinusoidal projection
example url: https://snow-data.jpl.nasa.gov/modscag-historic/2015/001/MOD09GA.A2015001.h07v03.005.2015006001833.snow_fraction.tif
"""
#Could also use global MODIS 500 m snowcover grids, 8 day
#http://nsidc.org/data/docs/daac/modis_v5/mod10a2_modis_terra_snow_8-day_global_500m_grid.gd.html
#These are HDF4, sinusoidal
#Should be able to load up with warplib without issue
import re
import requests
from bs4 import BeautifulSoup
auth = iolib.get_auth()
pad_days = timedelta(days=pad_days)
dt_list = timelib.dt_range(dem_dt-pad_days, dem_dt+pad_days+timedelta(1), timedelta(1))
outdir = os.path.join(datadir, 'modscag')
if not os.path.exists(outdir):
os.makedirs(outdir)
out_vrt_fn_list = []
for dt in dt_list:
out_vrt_fn = os.path.join(outdir, dt.strftime('%Y%m%d_snow_fraction.vrt'))
#If we already have a vrt and it contains all of the necessary tiles
if os.path.exists(out_vrt_fn):
vrt_ds = gdal.Open(out_vrt_fn)
if np.all([np.any([tile in sub_fn for sub_fn in vrt_ds.GetFileList()]) for tile in tile_list]):
out_vrt_fn_list.append(out_vrt_fn)
continue
#Otherwise, download missing tiles and rebuild
#Try to use historic products
modscag_fn_list = []
#Note: not all tiles are available for same date ranges in historic vs. real-time
#Need to repeat search tile-by-tile
for tile in tile_list:
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag-historic/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
modscag_url_fn = []
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
#Couldn't find historic, try to use real-time products
modscag_url_str = 'https://snow-data.jpl.nasa.gov/modscag/%Y/%j/'
modscag_url_base = dt.strftime(modscag_url_str)
print("Trying: %s" % modscag_url_base)
r = requests.get(modscag_url_base, auth=auth)
if r.ok:
parsed_html = BeautifulSoup(r.content, "html.parser")
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if not modscag_url_fn:
print("Unable to fetch MODSCAG for %s" % dt)
else:
#OK, we got
#Now extract actual tif filenames to fetch from html
parsed_html = BeautifulSoup(r.content, "html.parser")
#Fetch all tiles
modscag_url_fn = parsed_html.findAll(text=re.compile('%s.*snow_fraction.tif' % tile))
if modscag_url_fn:
modscag_url_fn = modscag_url_fn[0]
modscag_url = os.path.join(modscag_url_base, modscag_url_fn)
print(modscag_url)
modscag_fn = os.path.join(outdir, os.path.split(modscag_url_fn)[-1])
if not os.path.exists(modscag_fn):
iolib.getfile2(modscag_url, auth=auth, outdir=outdir)
modscag_fn_list.append(modscag_fn)
#Mosaic tiles - currently a hack
if modscag_fn_list:
cmd = ['gdalbuildvrt', '-vrtnodata', '255', out_vrt_fn]
cmd.extend(modscag_fn_list)
print(cmd)
subprocess.call(cmd, shell=False)
out_vrt_fn_list.append(out_vrt_fn)
return out_vrt_fn_list
def get_toa_fn(dem_fn):
toa_fn = None
#Original approach, assumes DEM file is in *00/dem_*/*DEM_32m.tif
#dem_dir = os.path.split(os.path.split(os.path.abspath(dem_fn))[0])[0]
dem_dir_list = os.path.split(os.path.realpath(dem_fn))[0].split(os.sep)
import re
#Get index of the top level pair directory containing toa (WV02_20140514_1030010031114100_1030010030896000)
r_idx = [i for i, item in enumerate(dem_dir_list) if re.search('(_10)*(_10)*00$', item)]
if r_idx:
r_idx = r_idx[0]
#Reconstruct dir
dem_dir = (os.sep).join(dem_dir_list[0:r_idx+1])
#Find toa.tif in top-level dir
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if not toa_fn:
ortho_fn = glob.glob(os.path.join(dem_dir, '*ortho*.tif'))
if ortho_fn:
cmd = ['toa.sh', dem_dir]
print(cmd)
subprocess.call(cmd)
toa_fn = glob.glob(os.path.join(dem_dir, '*toa.tif'))
if toa_fn:
toa_fn = toa_fn[0]
else:
toa_fn = None
if toa_fn is None:
sys.exit("Unable to locate TOA dataset")
return toa_fn
#TOA reflectance filter
def get_toa_mask(toa_ds, toa_thresh=0.4):
print("Applying TOA filter (masking values >= %0.2f)" % toa_thresh)
toa = iolib.ds_getma(toa_ds)
toa_mask = np.ma.masked_greater(toa, toa_thresh)
#This should be 1 for valid surfaces, 0 for snowcovered surfaces
toa_mask = ~(np.ma.getmaskarray(toa_mask))
return toa_mask
def check_mask_list(mask_list):
temp = []
for m in mask_list:
if m not in mask_choices:
print("Invalid mask choice: %s" % m)
else:
temp.append(m)
return temp
def get_mask(dem_ds, mask_list, dem_fn=None, writeout=False, outdir=None, args=None):
mask_list = check_mask_list(mask_list)
if 'none' in mask_list:
newmask = False
else:
#Basename for output files
if outdir is not None:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.split(dem_fn)[0]
if dem_fn is not None:
#Extract DEM timestamp
dem_dt = timelib.fn_getdatetime(dem_fn)
out_fn_base = os.path.join(outdir, os.path.splitext(dem_fn)[0])
if args is None:
#Get default values
parser = getparser()
args = parser.parse_args(['',])
newmask = True
if 'glaciers' in mask_list:
icemask = get_icemask(dem_ds)
if writeout:
out_fn = out_fn_base+'_ice_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(icemask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(icemask, newmask)
#Need to process NLCD separately, with nearest neighbor inteprolatin
if 'nlcd' in mask_list and args.nlcd_filter is not 'none':
rs = 'near'
nlcd_ds = gdal.Open(get_nlcd_fn())
nlcd_ds_warp = warplib.memwarp_multi([nlcd_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r=rs)[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_nlcd.tif'
nlcdmask = get_nlcd_mask(nlcd_ds_warp, filter=args.nlcd_filter, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(nlcdmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(nlcdmask, newmask)
if 'bareground' in mask_list and args.bareground_thresh > 0:
bareground_ds = gdal.Open(get_bareground_fn())
bareground_ds_warp = warplib.memwarp_multi([bareground_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
out_fn = None
if writeout:
out_fn = out_fn_base+'_bareground.tif'
baregroundmask = get_bareground_mask(bareground_ds_warp, bareground_thresh=args.bareground_thresh, out_fn=out_fn)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(baregroundmask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(baregroundmask, newmask)
if 'snodas' in mask_list and args.snodas_thresh > 0:
#Get SNODAS snow depth products for DEM timestamp
snodas_min_dt = datetime(2003,9,30)
if dem_dt >= snodas_min_dt:
snodas_ds = get_snodas_ds(dem_dt)
if snodas_ds is not None:
snodas_ds_warp = warplib.memwarp_multi([snodas_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
#snow depth values are mm, convert to meters
snodas_depth = iolib.ds_getma(snodas_ds_warp)/1000.
if snodas_depth.count() > 0:
print("Applying SNODAS snow depth filter (masking values >= %0.2f m)" % args.snodas_thresh)
out_fn = None
if writeout:
out_fn = out_fn_base+'_snodas_depth.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_depth, out_fn, src_ds=dem_ds)
snodas_mask = np.ma.masked_greater(snodas_depth, args.snodas_thresh)
snodas_mask = ~(np.ma.getmaskarray(snodas_mask))
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(snodas_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(snodas_mask, newmask)
else:
print("SNODAS grid for input location and timestamp is empty")
#These tiles cover CONUS
#tile_list=('h08v04', 'h09v04', 'h10v04', 'h08v05', 'h09v05')
if 'modscag' in mask_list and args.modscag_thresh > 0:
modscag_min_dt = datetime(2000,2,24)
if dem_dt < modscag_min_dt:
print("Warning: DEM timestamp (%s) is before earliest MODSCAG timestamp (%s)" \
% (dem_dt, modscag_min_dt))
else:
tile_list = get_modis_tile_list(dem_ds)
print(tile_list)
pad_days=7
modscag_fn_list = get_modscag_fn_list(dem_dt, tile_list=tile_list, pad_days=pad_days)
if modscag_fn_list:
modscag_ds = proc_modscag(modscag_fn_list, extent=dem_ds, t_srs=dem_ds)
modscag_ds_warp = warplib.memwarp_multi([modscag_ds,], res=dem_ds, extent=dem_ds, t_srs=dem_ds, r='cubicspline')[0]
print("Applying MODSCAG fractional snow cover percent filter (masking values >= %0.1f%%)" % args.modscag_thresh)
modscag_fsca = iolib.ds_getma(modscag_ds_warp)
out_fn = None
if writeout:
out_fn = out_fn_base+'_modscag_fsca.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_fsca, out_fn, src_ds=dem_ds)
modscag_mask = (modscag_fsca.filled(0) >= args.modscag_thresh)
modscag_mask = ~(modscag_mask)
if writeout:
out_fn = os.path.splitext(out_fn)[0]+'_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(modscag_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(modscag_mask, newmask)
#Use reflectance values to estimate snowcover
if 'toa' in mask_list:
#Use top of atmosphere scaled reflectance values (0-1)
toa_ds = gdal.Open(get_toa_fn(dem_fn))
toa_mask = get_toa_mask(toa_ds, args.toa_thresh)
if writeout:
out_fn = out_fn_base+'_toa_mask.tif'
print("Writing out %s" % out_fn)
iolib.writeGTiff(toa_mask, out_fn, src_ds=dem_ds)
newmask = np.logical_and(toa_mask, newmask)
if False:
#Filter based on expected snowline
#Simplest approach uses altitude cutoff
max_elev = 1500
newdem = np.ma.masked_greater(dem, max_elev)
newmask = np.ma.getmaskarray(newdem)
print("Generating final mask to use for reference surfaces, and applying to input DEM")
#Now invert to use to create final masked array
#True (1) represents "invalid" pixel to match numpy ma convetion
newmask = ~newmask
#Dilate the mask
if args.dilate is not None:
niter = args.dilate
print("Dilating mask with %i iterations" % niter)
from scipy import ndimage
newmask = ~(ndimage.morphology.binary_dilation(~newmask, iterations=niter))
return newmask
#Can add "mask_list" argument, instead of specifying individually
mask_choices = ['toa', 'snodas', 'modscag', 'bareground', 'glaciers', 'nlcd', 'none']
def getparser():
parser = argparse.ArgumentParser(description="Identify control surfaces for DEM co-registration")
parser.add_argument('dem_fn', type=str, help='DEM filename')
parser.add_argument('--outdir', default=None, help='Directory for output products')
parser.add_argument('--writeout', action='store_true', help='Write out all intermediate products, instead of only final tif')
#parser.add_argument('-datadir', default=None, help='Data directory containing reference data sources (NLCD, bareground, etc)')
parser.add_argument('--toa', action='store_true', help='Use top-of-atmosphere reflectance values (requires pregenerated "dem_fn_toa.tif")')
parser.add_argument('--toa_thresh', type=float, default=0.4, help='Top-of-atmosphere reflectance threshold (default: %(default)s, valid range 0.0-1.0), mask values greater than this value')
parser.add_argument('--snodas', action='store_true', help='Use SNODAS snow depth products')
parser.add_argument('--snodas_thresh', type=float, default=0.2, help='SNODAS snow depth threshold (default: %(default)s m), mask values greater than this value')
parser.add_argument('--modscag', action='store_true', help='Use MODSCAG fractional snow cover products')
parser.add_argument('--modscag_thresh', type=float, default=50, help='MODSCAG fractional snow cover percent threshold (default: %(default)s%%, valid range 0-100), mask greater than this value')
parser.add_argument('--bareground', action='store_true', help="Enable bareground filter")
parser.add_argument('--bareground_thresh', type=float, default=60, help='Percent bareground threshold (default: %(default)s%%, valid range 0-100), mask greater than this value (only relevant for global bareground data)')
parser.add_argument('--glaciers', action='store_true', help="Mask glacier polygons")
parser.add_argument('--nlcd', action='store_true', help="Enable NLCD LULC filter (for CONUS)")
nlcd_filter_choices = ['rock', 'rock+ice', 'rock+ice+water', 'not_forest', 'not_forest+not_water', 'none']
parser.add_argument('--nlcd_filter', type=str, default='not_forest', choices=nlcd_filter_choices, help='Preserve these NLCD pixels (default: %(default)s)')
parser.add_argument('--dilate', type=int, default=None, help='Dilate mask with this many iterations (default: %(default)s)')
return parser
def main():
parser = getparser()
args = parser.parse_args()
mask_list = []
if args.toa: mask_list.append('toa')
if args.snodas: mask_list.append('snodas')
if args.modscag: mask_list.append('modscag')
if args.bareground: mask_list.append('bareground')
if args.glaciers: mask_list.append('glaciers')
if args.nlcd: mask_list.append('nlcd')
if not mask_list:
parser.print_help()
sys.exit("Must specify at least one mask type")
#This directory should or will contain the relevant data products
#if args.datadir is None:
# datadir = iolib.get_datadir()
dem_fn = args.dem_fn
dem_ds = gdal.Open(dem_fn)
print(dem_fn)
#Get DEM masked array
dem = iolib.ds_getma(dem_ds)
print("%i valid pixels in original input tif" % dem.count())
#Set up cascading mask preparation
#True (1) represents "valid" unmasked pixel, False (0) represents "invalid" pixel to be masked
#Initialize the mask
#newmask = ~(np.ma.getmaskarray(dem))
newmask = get_mask(dem_ds, mask_list, dem_fn=dem_fn, writeout=args.writeout, outdir=args.outdir, args=args)
#Apply mask to original DEM - use these surfaces for co-registration
newdem = np.ma.array(dem, mask=newmask)
#Check that we have enough pixels, good distribution
min_validpx_count = 100
min_validpx_std = 10
validpx_count = newdem.count()
validpx_std = newdem.std()
print("%i valid pixels in masked output tif to be used as ref" % validpx_count)
print("%0.2f std in masked output tif to be used as ref" % validpx_std)
#if (validpx_count > min_validpx_count) and (validpx_std > min_validpx_std):
if (validpx_count > min_validpx_count):
out_fn = os.path.join(args.outdir, os.path.splitext(dem_fn)[0]+'_ref.tif')
print("Writing out %s" % out_fn)
iolib.writeGTiff(newdem, out_fn, src_ds=dem_ds)
else:
print("Not enough valid pixels!")
if __name__ == "__main__":
main()
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
apply_xy_shift
|
python
|
def apply_xy_shift(ds, dx, dy, createcopy=True):
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
|
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L14-L40
| null |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
compute_offset_sad
|
python
|
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
|
Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L65-L115
|
[
"def find_subpixel_peak_position(corr, subpixel_method='gaussian'):\n \"\"\"\n Find subpixel approximation of the correlation peak.\n\n This function returns a subpixels approximation of the correlation\n peak by using one of the several methods available. If requested, \n the function also returns the signal to noise ratio level evaluated \n from the correlation map.\n\n Parameters\n ----------\n corr : np.ndarray\n the correlation map.\n\n subpixel_method : string\n one of the following methods to estimate subpixel location of the peak: \n 'centroid' [replaces default if correlation map is negative], \n 'gaussian' [default if correlation map is positive], \n 'parabolic'.\n\n Returns\n -------\n subp_peak_position : two elements tuple\n the fractional row and column indices for the sub-pixel\n approximation of the correlation peak.\n\n Original code from openPIV pyprocess\n\n \"\"\"\n # initialization\n default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)\n\n # the peak locations\n peak1_i, peak1_j, dummy = find_first_peak(corr)\n\n try:\n # the peak and its neighbours: left, right, down, up\n c = corr[peak1_i, peak1_j]\n cl = corr[peak1_i-1, peak1_j]\n cr = corr[peak1_i+1, peak1_j]\n cd = corr[peak1_i, peak1_j-1] \n cu = corr[peak1_i, peak1_j+1]\n\n # gaussian fit\n if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':\n subpixel_method = 'centroid'\n\n try: \n if subpixel_method == 'centroid':\n subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),\n ((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))\n\n elif subpixel_method == 'gaussian':\n subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),\n peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu)))) \n\n elif subpixel_method == 'parabolic':\n subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),\n peak1_j + (cd-cu)/(2*cd-4*c+2*cu)) \n\n except: \n subp_peak_position = default_peak_position\n\n except IndexError:\n subp_peak_position = default_peak_position\n\n return subp_peak_position[0], subp_peak_position[1]\n"
] |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
compute_offset_ncc
|
python
|
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
|
Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L118-L198
|
[
"def find_subpixel_peak_position(corr, subpixel_method='gaussian'):\n \"\"\"\n Find subpixel approximation of the correlation peak.\n\n This function returns a subpixels approximation of the correlation\n peak by using one of the several methods available. If requested, \n the function also returns the signal to noise ratio level evaluated \n from the correlation map.\n\n Parameters\n ----------\n corr : np.ndarray\n the correlation map.\n\n subpixel_method : string\n one of the following methods to estimate subpixel location of the peak: \n 'centroid' [replaces default if correlation map is negative], \n 'gaussian' [default if correlation map is positive], \n 'parabolic'.\n\n Returns\n -------\n subp_peak_position : two elements tuple\n the fractional row and column indices for the sub-pixel\n approximation of the correlation peak.\n\n Original code from openPIV pyprocess\n\n \"\"\"\n # initialization\n default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)\n\n # the peak locations\n peak1_i, peak1_j, dummy = find_first_peak(corr)\n\n try:\n # the peak and its neighbours: left, right, down, up\n c = corr[peak1_i, peak1_j]\n cl = corr[peak1_i-1, peak1_j]\n cr = corr[peak1_i+1, peak1_j]\n cd = corr[peak1_i, peak1_j-1] \n cu = corr[peak1_i, peak1_j+1]\n\n # gaussian fit\n if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':\n subpixel_method = 'centroid'\n\n try: \n if subpixel_method == 'centroid':\n subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),\n ((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))\n\n elif subpixel_method == 'gaussian':\n subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),\n peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu)))) \n\n elif subpixel_method == 'parabolic':\n subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),\n peak1_j + (cd-cu)/(2*cd-4*c+2*cu)) \n\n except: \n subp_peak_position = default_peak_position\n\n except IndexError:\n subp_peak_position = default_peak_position\n\n return subp_peak_position[0], subp_peak_position[1]\n"
] |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
compute_offset_nuth
|
python
|
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
|
Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L201-L373
|
[
"def nuth_func(x, a, b, c):\n y = a * np.cos(np.deg2rad(b-x)) + c\n #Can use Phasor addition, but need to change conversion to offset dx and dy\n #https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1\n #y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c\n return y\n"
] |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
find_first_peak
|
python
|
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
|
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L387-L416
| null |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/coreglib.py
|
find_subpixel_peak_position
|
python
|
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/coreglib.py#L419-L485
|
[
"def find_first_peak(corr):\n \"\"\"\n Find row and column indices of the first correlation peak.\n\n Parameters\n ----------\n corr : np.ndarray\n the correlation map\n\n Returns\n -------\n i : int\n the row index of the correlation peak\n\n j : int\n the column index of the correlation peak \n\n corr_max1 : int\n the value of the correlation peak\n\n Original code from openPIV pyprocess\n\n \"\"\" \n ind = corr.argmax()\n s = corr.shape[1] \n\n i = ind // s \n j = ind % s\n\n return i, j, corr.max()\n"
] |
#! /usr/bin/env python
"""
Library of functions that can be used for co-registration of raster data
For many situations, ASP pc_align ICP co-registration is superior to these approaches. See pc_align_wrapper.sh
"""
import sys
import numpy as np
import matplotlib.pyplot as plt
from pygeotools.lib import malib, iolib
def apply_xy_shift(ds, dx, dy, createcopy=True):
"""
Apply horizontal shift to GDAL dataset GeoTransform
Returns:
GDAL Dataset copy with updated GeoTransform
"""
print("X shift: ", dx)
print("Y shift: ", dy)
#Update geotransform
gt_orig = ds.GetGeoTransform()
gt_shift = np.copy(gt_orig)
gt_shift[0] += dx
gt_shift[3] += dy
print("Original geotransform:", gt_orig)
print("Updated geotransform:", gt_shift)
#Update ds Geotransform
if createcopy:
ds_align = iolib.mem_drv.CreateCopy('', ds, 0)
else:
#Update in place, assume ds is opened as GA_Update
ds_align = ds
ds_align.SetGeoTransform(gt_shift)
return ds_align
def apply_z_shift(ds, dz, createcopy=True):
if isinstance(dz, np.ndarray):
print("Z shift offset array mean: ", dz.mean())
else:
print("Z shift offset: ", dz)
if createcopy:
ds_shift = iolib.mem_drv.CreateCopy('', ds, 0)
else:
ds_shift = ds
b = ds_shift.GetRasterBand(1)
a = iolib.b_getma(b)
a += dz
b.WriteArray(a.filled())
return ds_shift
#Function for fitting Nuth and Kaab (2011)
def nuth_func(x, a, b, c):
y = a * np.cos(np.deg2rad(b-x)) + c
#Can use Phasor addition, but need to change conversion to offset dx and dy
#https://stackoverflow.com/questions/12397412/i-know-scipy-curve-fit-can-do-better?rq=1
#y = a * np.cos(np.deg2rad(x)) + b * np.sin(np.deg2rad(x)) + c
return y
def compute_offset_sad(dem1, dem2, pad=(9,9), plot=False):
"""Compute subpixel horizontal offset between input rasters using sum of absolute differences (SAD) method
"""
#This defines the search window size
#Use half-pixel stride?
#Note: stride is not properly implemented
#stride = 1
#ref = dem1[::stride,::stride]
#kernel = dem2[pad[0]:-pad[0]:stride, pad[1]:-pad[1]:stride]
kernel = dem2[pad[0]:-pad[0], pad[1]:-pad[1]]
#Want to pad evenly on both sides, so add +1 here
m = np.zeros((pad[0]*2+1, pad[1]*2+1))
#Find integer pixel offset
i = j = 0
for i in range(m.shape[0]):
print(i)
for j in range(m.shape[1]):
print(j)
ref = dem1[i:i+kernel.shape[0], j:j+kernel.shape[1]]
diff = ref - kernel
#Remove outliers beyond IQR
diff_iqr = malib.calcperc(diff, (25,75))
diff = np.ma.masked_outside(diff, *diff_iqr)
"""
diff_med = np.ma.median(diff)
diff_mad = malib.mad(diff)
diff_madr = (diff_med - mad, diff_med + mad)
diff = np.ma.masked_outside(diff, diff_madr)
"""
#Masked areas will decrease sum! Normalize by count of valid pixels
m[i,j] = np.ma.abs(diff).sum()/diff.count()
#Note, we're dealing with min SAD here, so want to provide -m for sub-pixel refinement
m = -m
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax - pad
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
sp_offset = sp_argmax - pad
if plot:
plt.figure()
plt.title('Sum of Absolute Differences')
plt.imshow(m)
plt.scatter(*sp_argmax[::-1])
#plt.show()
return m, int_offset, sp_offset
#This is a decent full-image normalized cross-correlation routine with sub-pixel refinement
def compute_offset_ncc(dem1, dem2, pad=(9,9), prefilter=False, plot=False):
"""Compute horizontal offset between input rasters using normalized cross-correlation (NCC) method
"""
#Apply edge detection filter up front - improves results when input DEMs are same resolution
if prefilter:
print("Applying LoG edge-detection filter to DEMs")
sigma = 1
import scipy.ndimage
#Note, ndimage alone propagates Nans and greatly reduces valid data area
#Use the malib.nanfill wrapper to avoid this
dem1 = malib.nanfill(dem1, scipy.ndimage.filters.gaussian_laplace, sigma)
dem2 = malib.nanfill(dem2, scipy.ndimage.filters.gaussian_laplace, sigma)
import scipy.signal
#Compute max offset given dem spatial resolution
#Should implement arbirary x and y search space
#xsearch = (20, 41)
#ysearch = (-10, 1)
stride = 1
ref = dem1[::stride,::stride]
kernel = dem2[pad[0]:-pad[1]:stride, pad[0]:-pad[1]:stride]
#kernel = dem2[-ysearch[0]:-ysearch[1]:stride, xsearch[0]:-xsearch[1]:stride]
#Normalize
ref = (ref - ref.mean()) / ref.std()
kernel = (kernel - kernel.mean()) / kernel.std()
#Consider using astropy.convolve here instead of scipy.correlate?
print("Adding random noise to masked regions")
#Generate random noise to fill gaps before correlation in frequency domain
#Normal distribution N(mean, std^2)
#ref_noise = ref.mask * ref.std() * np.random.rand(*ref.shape) + ref.mean()
#kernel_noise = kernel.mask * kernel.std() * np.random.rand(*kernel.shape) + kernel.mean()
#This provides noise in proper range, but noise propagates to m, peak is in different locations!
#ref_noise = ref.mask * (ref.min() + ref.ptp() * np.random.rand(*ref.shape))
#kernel_noise = kernel.mask * (kernel.min() + kernel.ptp() * np.random.rand(*kernel.shape))
#This provides a proper normal distribution with mean=0 and std=1
ref_noise = ref.mask * (np.random.randn(*ref.shape))
kernel_noise = kernel.mask * (np.random.randn(*kernel.shape))
#Add the noise
ref = ref.filled(0) + ref_noise
kernel = kernel.filled(0) + kernel_noise
print("Running 2D correlation with search window (x,y): %i, %i" % (pad[1], pad[0]))
m = scipy.signal.correlate2d(ref, kernel, 'valid')
#This has memory issues, but ndimage filters can handle nan
#m = scipy.ndimage.filters.correlate(ref, kernel)
print("Computing sub-pixel peak")
int_argmax = np.array(np.unravel_index(m.argmax(), m.shape))
int_offset = int_argmax*stride - pad
#int_offset = int_argmax*stride + np.array([ysearch[0], xsearch[0]])
print(m.argmax())
print(m.shape)
print(int_argmax)
print(int_offset)
#Find sub-pixel peak
sp_argmax = np.array(find_subpixel_peak_position(m, 'parabolic'))
#May need to split this into integer and decimal components, multipy stride*int and add decimal
#sp_offset = int_offset + (sp_argmax - int_argmax)
sp_offset = sp_argmax - pad
#sp_offset = sp_argmax + np.array([ysearch[0], xsearch[0]])
print(sp_argmax)
print(sp_offset)
if plot:
fig, ax = plt.subplots()
ax.set_title('NCC offset, parabolic SPR')
ax.imshow(m)
#plt.scatter(*int_argmax[::-1])
ax.scatter(*sp_argmax[::-1])
else:
fig = None
return m, int_offset, sp_offset, fig
#This is the Nuth and Kaab (2011) method
def compute_offset_nuth(dh, slope, aspect, min_count=100, remove_outliers=True, plot=True):
"""Compute horizontal offset between input rasters using Nuth and Kaab [2011] (nuth) method
"""
import scipy.optimize as optimization
if dh.count() < min_count:
sys.exit("Not enough dh samples")
if slope.count() < min_count:
sys.exit("Not enough slope/aspect samples")
#mean_dh = dh.mean()
#mean_slope = slope.mean()
#c_seed = (mean_dh/np.tan(np.deg2rad(mean_slope)))
med_dh = malib.fast_median(dh)
med_slope = malib.fast_median(slope)
c_seed = (med_dh/np.tan(np.deg2rad(med_slope)))
x0 = np.array([0.0, 0.0, c_seed])
print("Computing common mask")
common_mask = ~(malib.common_mask([dh, aspect, slope]))
#Prepare x and y data
xdata = aspect[common_mask].data
ydata = (dh[common_mask]/np.tan(np.deg2rad(slope[common_mask]))).data
print("Initial sample count:")
print(ydata.size)
if remove_outliers:
print("Removing outliers")
#print("Absolute dz filter: %0.2f" % max_dz)
#diff = np.ma.masked_greater(diff, max_dz)
#print(diff.count())
#Outlier dz filter
f = 3
sigma, u = (ydata.std(), ydata.mean())
#sigma, u = malib.mad(ydata, return_med=True)
rmin = u - f*sigma
rmax = u + f*sigma
print("3-sigma filter: %0.2f - %0.2f" % (rmin, rmax))
idx = (ydata >= rmin) & (ydata <= rmax)
xdata = xdata[idx]
ydata = ydata[idx]
print(ydata.size)
#Generate synthetic data to test curve_fit
#xdata = np.arange(0,360,0.01)
#ydata = f(xdata, 20.0, 130.0, -3.0) + 20*np.random.normal(size=len(xdata))
#Limit sample size
#n = 10000
#idx = random.sample(range(xdata.size), n)
#xdata = xdata[idx]
#ydata = ydata[idx]
#Compute robust statistics for 1-degree bins
nbins = 360
bin_range = (0., 360.)
bin_width = 1.0
bin_count, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='count', nbins=nbins, bin_range=bin_range)
bin_med, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat='median', nbins=nbins, bin_range=bin_range)
#Needed to estimate sigma for weighted lsq
#bin_mad, bin_edges, bin_centers = malib.bin_stats(xdata, ydata, stat=malib.mad, nbins=nbins, bin_range=bin_range)
#Started implementing this for more generic binning, needs testing
#bin_count, x_bin_edges, y_bin_edges = malib.get_2dhist(xdata, ydata, \
# xlim=bin_range, nbins=(nbins, nbins), stat='count')
"""
#Mask bins in grid directions, can potentially contain biased stats
#Especially true for SGM algorithm
#badbins = [0, 90, 180, 270, 360]
badbins = [0, 45, 90, 135, 180, 225, 270, 315, 360]
bin_stat = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_stat)
bin_edges = np.ma.masked_where(np.around(bin_edges[:-1]) % 45 == 0, bin_edges)
"""
#Remove any bins with only a few points
min_bin_sample_count = 9
idx = (bin_count.filled(0) >= min_bin_sample_count)
bin_count = bin_count[idx].data
bin_med = bin_med[idx].data
#bin_mad = bin_mad[idx].data
bin_centers = bin_centers[idx]
fit = None
fit_fig = None
#Want a good distribution of bins, at least 1/4 to 1/2 of sinusoid, to ensure good fit
#Need at least 3 valid bins to fit 3 parameters in nuth_func
#min_bin_count = 3
min_bin_count = 90
#Not going to help if we have a step function between two plateaus, but better than nothing
#Calculate bin aspect spread
bin_ptp = np.cos(np.radians(bin_centers)).ptp()
min_bin_ptp = 1.0
#Should iterate here, if not enough bins, increase bin width
if len(bin_med) >= min_bin_count and bin_ptp >= min_bin_ptp:
print("Computing fit")
#Unweighted fit
fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0)[0]
#Weight by observed spread in each bin
#sigma = bin_mad
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=True)[0]
#Weight by bin count
#sigma = bin_count.max()/bin_count
#fit = optimization.curve_fit(nuth_func, bin_centers, bin_med, x0, sigma, absolute_sigma=False)[0]
print(fit)
if plot:
print("Generating Nuth and Kaab plot")
bin_idx = np.digitize(xdata, bin_edges)
output = []
for i in np.arange(1, len(bin_edges)):
output.append(ydata[bin_idx==i])
#flierprops={'marker':'.'}
lw = 0.25
whiskerprops={'linewidth':lw}
capprops={'linewidth':lw}
boxprops={'facecolor':'k', 'linewidth':0}
medianprops={'marker':'o', 'ms':1, 'color':'r'}
fit_fig, ax = plt.subplots(figsize=(6,6))
#widths = (bin_width/2.0)
widths = 2.5*(bin_count/bin_count.max())
#widths = bin_count/np.percentile(bin_count, 50)
#Stride
s=3
#This is inefficient, but we have list of arrays with different length, need to filter
#Reduntant with earlier filter, should refactor
bp = ax.boxplot(np.array(output)[idx][::s], positions=bin_centers[::s], widths=widths[::s], showfliers=False, \
patch_artist=True, boxprops=boxprops, whiskerprops=whiskerprops, capprops=capprops, \
medianprops=medianprops)
bin_ticks = [0, 45, 90, 135, 180, 225, 270, 315, 360]
ax.set_xticks(bin_ticks)
ax.set_xticklabels(bin_ticks)
"""
#Can pull out medians from boxplot
#We are computing multiple times, inefficient
bp_bin_med = []
for medline in bp['medians']:
bp_bin_med.append(medline.get_ydata()[0])
"""
#Plot the fit
f_a = nuth_func(bin_centers, fit[0], fit[1], fit[2])
nuth_func_str = r'$y=%0.2f*cos(%0.2f-x)+%0.2f$' % tuple(fit)
ax.plot(bin_centers, f_a, 'b', label=nuth_func_str)
ax.set_xlabel('Aspect (deg)')
ax.set_ylabel('dh/tan(slope) (m)')
ax.axhline(color='gray', linewidth=0.5)
ax.set_xlim(*bin_range)
ylim = ax.get_ylim()
abs_ylim = np.max(np.abs(ylim))
#abs_ylim = np.max(np.abs([ydata.min(), ydata.max()]))
#pad = 0.2 * abs_ylim
pad = 0
ylim = (-abs_ylim - pad, abs_ylim + pad)
minylim = (-10,10)
if ylim[0] > minylim[0]:
ylim = minylim
ax.set_ylim(*ylim)
ax.legend(prop={'size':8})
return fit, fit_fig
#Attempt to fit polynomial functions to along-track and cross-track signals
#See demtools for existing code
def fit_at_ct():
#Derive from image corners in projected array
#Use known orbintal inclinations, project wgs geometry into srs
img1_inc
img2_inc
#Rotate
#Stats for rows, cols
#Fit
#Function copied from from openPIV pyprocess
def find_first_peak(corr):
"""
Find row and column indices of the first correlation peak.
Parameters
----------
corr : np.ndarray
the correlation map
Returns
-------
i : int
the row index of the correlation peak
j : int
the column index of the correlation peak
corr_max1 : int
the value of the correlation peak
Original code from openPIV pyprocess
"""
ind = corr.argmax()
s = corr.shape[1]
i = ind // s
j = ind % s
return i, j, corr.max()
#Function copied from from openPIV pyprocess
def find_subpixel_peak_position(corr, subpixel_method='gaussian'):
"""
Find subpixel approximation of the correlation peak.
This function returns a subpixels approximation of the correlation
peak by using one of the several methods available. If requested,
the function also returns the signal to noise ratio level evaluated
from the correlation map.
Parameters
----------
corr : np.ndarray
the correlation map.
subpixel_method : string
one of the following methods to estimate subpixel location of the peak:
'centroid' [replaces default if correlation map is negative],
'gaussian' [default if correlation map is positive],
'parabolic'.
Returns
-------
subp_peak_position : two elements tuple
the fractional row and column indices for the sub-pixel
approximation of the correlation peak.
Original code from openPIV pyprocess
"""
# initialization
default_peak_position = (corr.shape[0]/2,corr.shape[1]/2)
# the peak locations
peak1_i, peak1_j, dummy = find_first_peak(corr)
try:
# the peak and its neighbours: left, right, down, up
c = corr[peak1_i, peak1_j]
cl = corr[peak1_i-1, peak1_j]
cr = corr[peak1_i+1, peak1_j]
cd = corr[peak1_i, peak1_j-1]
cu = corr[peak1_i, peak1_j+1]
# gaussian fit
if np.any(np.array([c,cl,cr,cd,cu]) < 0) and subpixel_method == 'gaussian':
subpixel_method = 'centroid'
try:
if subpixel_method == 'centroid':
subp_peak_position = (((peak1_i-1)*cl+peak1_i*c+(peak1_i+1)*cr)/(cl+c+cr),
((peak1_j-1)*cd+peak1_j*c+(peak1_j+1)*cu)/(cd+c+cu))
elif subpixel_method == 'gaussian':
subp_peak_position = (peak1_i + ((np.log(cl)-np.log(cr))/(2*np.log(cl) - 4*np.log(c) + 2*np.log(cr))),
peak1_j + ((np.log(cd)-np.log(cu))/( 2*np.log(cd) - 4*np.log(c) + 2*np.log(cu))))
elif subpixel_method == 'parabolic':
subp_peak_position = (peak1_i + (cl-cr)/(2*cl-4*c+2*cr),
peak1_j + (cd-cu)/(2*cd-4*c+2*cu))
except:
subp_peak_position = default_peak_position
except IndexError:
subp_peak_position = default_peak_position
return subp_peak_position[0], subp_peak_position[1]
|
dshean/demcoreg
|
demcoreg/pc_align_error_analysis.py
|
main
|
python
|
def main():
#filenames = !ls *align/*reference-DEM.tif
#run ~/src/demtools/error_analysis.py $filenames.s
if len(sys.argv) < 1:
sys.exit('No input files provided')
fn_list = sys.argv[1:]
n_samp = len(fn_list)
error_dict_list = []
for fn in fn_list:
ed = parse_pc_align_log(fn)
if 'Translation vector (North-East-Down, meters)' in ed.keys():
error_dict_list.append(ed)
import matplotlib.dates as mdates
#This is used for interactive display of x-value in plot window
date_str = '%Y-%m-%d %H:%M'
date_fmt = mdates.DateFormatter(date_str)
#ax.fmt_xdata = mdates.DateFormatter(date_fmt)
months = mdates.MonthLocator()
months_int = mdates.MonthLocator(interval=6) # every n months
years = mdates.YearLocator() # every year
yearsFmt = mdates.DateFormatter('%Y')
#ax.xaxis.set_major_formatter(yearsFmt)
#ax.xaxis.set_major_locator(months_int3)
print
print "n:", len(error_dict_list)
#NOTE: changed default to N-E-D on 9/18/15
#Can have significant differences for local proj vs. polar stereographic proj
#Should regenerate all previous figures
#Local translation on ellipsoid
#This appears to be local stereographic projection on ellipsoid
key = 'Translation vector (North-East-Down, meters)'
val = np.array([e[key] for e in error_dict_list])
#Reformat (n, e, +d) for (x, y, +z) coord sys
val[:,[0,1]] = val[:,[1,0]]
val[:,2] *= -1
ce90 = geolib.CE90(val[:,0], val[:,1])
le90 = geolib.LE90(val[:,2])
print
print key
print "CE90:", ce90
print "LE90:", le90
print
print 'Centroid (mean) of offsets (local ned meters): ', np.mean(val, axis=0)
print 'Centroid (median) of offsets (local ned meters): ', np.median(val, axis=0)
#Remove vertical bias
remove_vertbias = False
if remove_vertbias:
print "Removing vertical bias: %0.2f" % np.mean(val, axis=0)[2]
val[:,2] -= np.mean(val, axis=0)[2]
remove_outliers = False
#Flag outliers
x_mag = val[:,0]
y_mag = val[:,1]
h_mag = np.sqrt(val[:,0]**2 + val[:,1]**2)
v_mag = val[:,2]
mag = np.sqrt(val[:,0]**2 + val[:,1]**2 + val[:,2]**2)
abs_thresh = 10.0
p = 98.0
p_thresh = np.percentile(h_mag, p)
#print "Outliers with horiz error >= %0.2f (%0.1f%%)" % (p_thresh, p)
print "Outliers:"
#idx = (h_mag >= p_thresh).nonzero()[0]
idx = (h_mag >= ce90).nonzero()[0]
idx = np.unique(np.hstack([idx, ((np.abs(v_mag) >= le90).nonzero()[0])]))
#Print all
#idx = np.arange(h_mag.size)
#idx_sort = np.argsort(mag[idx])
#idx = idx[idx_sort]
print 'name, m, h, v, x, y, z'
for i in idx:
print error_dict_list[i]['File'], mag[i], h_mag[i], v_mag[i], val[i,0:3]
#Delete from list
if remove_outliers:
print "Removing from calculation"
del error_dict_list[i]
if remove_vertbias or remove_outliers:
print
print "Updated values"
print key
print "CE90:", geolib.CE90(val[:,0], val[:,1])
print "LE90:", geolib.LE90(val[:,2])
print
print 'Centroid (mean) of offsets (local ned meters): ', np.mean(val, axis=0)
print 'Centroid (median) of offsets (local ned meters): ', np.median(val, axis=0)
#Extract dates
date_vec = np.array([e['Date'] for e in error_dict_list])
x = date_vec
make_plot3d(val[:,0], val[:,1], val[:,2], title=key)
#Note: there is a bug in pdf that displayes surface lines
#fig_fn = 'icp_translation_vec_proj_meters.png'
fig_fn = 'icp_translation_vec_local_meters.png'
#plt.savefig(fig_fn, dpi=600, bbox_inches='tight')
fig, ax = plt.subplots(1)
key = 'Translation vector (lat,lon,z)'
plt.title('ICP translation vector (lat,lon,z): Z component')
val = np.array([e[key] for e in error_dict_list])
y = val[:,2]
make_plot(x,y,c='b',label=key, abs=False)
fig.autofmt_xdate()
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('Z offset (m)')
fig, ax = plt.subplots(1)
key = 'Translation vector magnitude (meters)'
plt.title('ICP Translation vector magnitude (meters)')
y = np.array([e[key] for e in error_dict_list])
make_plot(x,y,c='b',label=key, abs=True)
fig.autofmt_xdate()
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('Offset (m)')
fig, ax = plt.subplots(1)
key = 'Number of errors'
plt.title('Number of error samples')
nerr = np.array([e[key] for e in error_dict_list])
make_plot(x,nerr,c='b',label=key)
fig.autofmt_xdate()
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('N samples')
"""
fig, ax = plt.subplots(1)
plt.title('ICP Standard Deviation')
key = 'Input Std Error'
in_std = np.array([e[key] for e in error_dict_list])
make_plot(x,in_std,c='r',label=key)
key = 'Output Std Error'
out_std = np.array([e[key] for e in error_dict_list])
make_plot(x,out_std,c='b',label=key)
fig.autofmt_xdate()
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
plt.legend(scatterpoints=1)
"""
fig, ax = plt.subplots(1)
plt.title('ICP Mean Error')
key = 'Input Mean Error'
in_mean = np.array([e[key] for e in error_dict_list])
#make_plot(x,in_mean,c='r',label=key,yerr=in_std)
make_plot(x,in_mean,c='r',label=key, abs=True)
key = 'Output Mean Error'
out_mean = np.array([e[key] for e in error_dict_list])
#make_plot(x,out_mean,c='b',label=key,yerr=out_std)
make_plot(x,out_mean,c='b',label=key, abs=True)
fig.autofmt_xdate()
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('Mean error (m)')
plt.legend(scatterpoints=1, loc='upper left', prop={'size':8})
fig, ax = plt.subplots(1)
plt.title('ICP Median Error')
key = 'Input 16th Percentile Error'
in_16p = np.array([e[key] for e in error_dict_list])
key = 'Input 84th Percentile Error'
in_84p = np.array([e[key] for e in error_dict_list])
key = 'Input Median Error'
in_med = np.array([e[key] for e in error_dict_list])
make_plot(x,in_med,c='r',label=key,yerr=[in_med - in_16p, in_84p - in_med], abs=True)
key = 'Output 16th Percentile Error'
out_16p = np.array([e[key] for e in error_dict_list])
key = 'Output 84th Percentile Error'
out_84p = np.array([e[key] for e in error_dict_list])
key = 'Output Median Error'
out_med = np.array([e[key] for e in error_dict_list])
make_plot(x,out_med,c='b',label=key,yerr=[out_med - out_16p, out_84p - out_med], abs=True)
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter(date_fmt)
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('Median error (m)')
plt.legend(scatterpoints=1, loc='upper left', prop={'size':8})
fig_fn = 'icp_median_error.pdf'
plt.savefig(fig_fn, dpi=600, bbox_inches='tight')
fig, ax = plt.subplots(1)
plt.title('Sampled Median Error')
key = 'Input Sampled 16th Percentile Error'
in_16p = np.ma.fix_invalid([e[key] for e in error_dict_list])
if in_16p.count() > 0:
key = 'Input Sampled 84th Percentile Error'
in_84p = np.ma.fix_invalid([e[key] for e in error_dict_list])
key = 'Input Sampled Median Error'
in_med = np.ma.fix_invalid([e[key] for e in error_dict_list])
in_spread = in_84p - in_16p
make_plot(x,in_med,c='r',label=key,yerr=[in_med - in_16p, in_84p - in_med], abs=True)
key = 'Output Sampled 16th Percentile Error'
out_16p = np.ma.fix_invalid([e[key] for e in error_dict_list])
key = 'Output Sampled 84th Percentile Error'
out_84p = np.ma.fix_invalid([e[key] for e in error_dict_list])
key = 'Output Sampled Median Error'
out_med = np.ma.fix_invalid([e[key] for e in error_dict_list])
out_spread = out_84p - out_16p
p = 90.0
out_med_thresh = np.percentile(out_med, p)
out_spread_thresh = np.percentile(out_spread, p)
#print "Outliers with horiz error >= %0.2f (%0.1f%%)" % (p_thresh, p)
print
print "Sampled Error Outliers:"
#idx = (h_mag >= p_thresh).nonzero()[0]
idx = (out_med >= out_med_thresh).nonzero()[0]
idx = np.unique(np.hstack([idx, ((out_spread >= out_spread_thresh).nonzero()[0])]))
#Print all
idx = np.arange(out_med.size)
idx_sort = np.argsort(out_med[idx])
idx = idx[idx_sort]
print 'name, samp_mederrr, samp_errspread, nerr'
for i in idx:
print error_dict_list[i]['File'], out_med[i], out_spread[i], nerr[i]
#Delete from list
if remove_outliers:
print "Removing from calculation"
del error_dict_list[i]
print
print 'Input sampled median error (spread/2): %0.2f (%0.2f)' % (np.median(in_med), np.median(in_spread)/2.)
print 'Output sampled median error (spread/2): %0.2f (%0.2f)' % (np.median(out_med), np.median(out_spread)/2.)
print
make_plot(x,out_med,c='b',label=key,yerr=[out_med - out_16p, out_84p - out_med], abs=True)
fig.autofmt_xdate()
ax.set_ylabel('Median error (m)')
ax.fmt_xdata = mdates.DateFormatter(date_fmt)
ax.xaxis.set_minor_locator(months)
#ax.xaxis.set_major_locator(months_int)
#ax.xaxis.set_major_formatter(date_fmt)
ax.fmt_xdata = date_fmt
ax.set_ylabel('Median error (m)')
plt.legend(scatterpoints=1, loc='upper left', prop={'size':8})
ax.set_ylim(-15,15)
fig_fn = 'sampled_median_error.pdf'
#fig_fn = 'sampled_median_error_2014-2016.pdf'
#from datetime import datetime
#ax.set_xlim(datetime(2014,1,1),datetime(2016,7,1))
plt.savefig(fig_fn, dpi=600, bbox_inches='tight')
|
#ECEF translations
#key = 'Translation vector (ECEF meters)'
key = 'Translation vector (Cartesian, meters)'
#key = 'Translation vector (meters)'
val = np.array([e[key] for e in error_dict_list])
#make_plot3d(val[:,0], val[:,1], val[:,2], title=key)
ce90 = geolib.CE90(val[:,0], val[:,1])
le90 = geolib.LE90(val[:,2])
print
print key
print "CE90:", ce90
print "LE90:", le90
print
#Proj translation
key = 'Translation vector (Proj meters)'
val = np.array([e[key] for e in error_dict_list])
ce90 = geolib.CE90(val[:,0], val[:,1])
le90 = geolib.LE90(val[:,2])
print
print key
print "CE90:", ce90
print "LE90:", le90
print
print 'Centroid (mean) of offsets (Proj meters): ', np.mean(val, axis=0)
print 'Centroid (median) of offsets (Proj meters): ', np.median(val, axis=0)
|
train
|
https://github.com/dshean/demcoreg/blob/abd6be75d326b35f52826ee30dff01f9e86b4b52/demcoreg/pc_align_error_analysis.py#L315-L611
|
[
"def make_plot3d(x, y, z, title=None, orthogonal_fig=True):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.set_aspect('equal')\n ax.set_xlabel('X offset (m)')\n ax.set_ylabel('Y offset (m)')\n ax.set_zlabel('Z offset (m)')\n if title is not None:\n plt.suptitle(title)\n ax.plot(x, y, z, 'o')\n\n cmean = np.mean([x,y,z], axis=1)\n cmed = np.median([x,y,z], axis=1)\n\n ax.scatter(cmean[0], cmean[1], cmean[2], color='r', marker='s')\n\n ce90 = geolib.CE90(x,y)\n le90 = geolib.LE90(z)\n coefs = [ce90, ce90, le90]\n ax.set_title(\"CE90: %0.2f, LE90: %0.2f, n=%i\" % (ce90, le90, x.shape[0])) \n\n maxdim = np.ceil(np.max([np.max(np.abs([x, y, z])), ce90, le90]))\n ax.set_xlim(-maxdim, maxdim)\n ax.set_ylim(-maxdim, maxdim)\n ax.set_zlim(-maxdim, maxdim)\n\n rx, ry, rz = coefs\n u = np.linspace(0, 2 * np.pi, 100)\n v = np.linspace(0, np.pi, 100)\n ex = rx * np.outer(np.cos(u), np.sin(v))\n ey = ry * np.outer(np.sin(u), np.sin(v))\n ez = rz * np.outer(np.ones_like(u), np.cos(v))\n ax.plot_surface(ex, ey, ez, rstride=2, cstride=2, linewidth=0, color='b', alpha=0.1)\n #max_radius = max(rx, ry, rz)\n #for axis in 'xyz':\n # getattr(ax, 'set_{}lim'.format(axis))((-max_radius, max_radius))\n if orthogonal_fig:\n from matplotlib.patches import Ellipse\n fig_ortho = plt.figure(figsize=(10,4))\n #fig_ortho = plt.figure()\n title='ICP Alignment Translation Vectors\\nn=%i, mean: (%0.2f, %0.2f, %0.2f)\\nCE90: %0.2f, LE90: %0.2f' % (x.shape[0], cmean[0], cmean[1], cmean[2], ce90, le90)\n plt.suptitle(title) \n\n ax = fig_ortho.add_subplot(131)\n ax.plot(x, y, color='b', linestyle='None', marker='o', label='ICP correction vector')\n ax.plot(cmean[0], cmean[1], color='r', linestyle='None', marker='s', label='Mean')\n #ax.scatter(x, y)\n #ax.scatter(cmean[0], cmean[1], color='r', marker='s')\n ax.set_xlim(-maxdim, maxdim)\n ax.set_ylim(-maxdim, maxdim)\n ax.minorticks_on()\n ax.set_aspect('equal')\n ax.set_xlabel('X offset (m)')\n ax.set_ylabel('Y offset (m)')\n e = Ellipse((0,0), 2*ce90, 2*ce90, linewidth=0, alpha=0.1)\n ax.add_artist(e)\n plt.legend(prop={'size':8}, numpoints=1, loc='upper left')\n\n ax = fig_ortho.add_subplot(132)\n ax.plot(x, z, color='b', linestyle='None', marker='o', label='ICP correction vector')\n ax.plot(cmean[0], cmean[2], color='r', linestyle='None', marker='s', label='Mean')\n #ax.scatter(x, z)\n #ax.scatter(cmean[0], cmean[2], color='r', marker='s')\n ax.set_xlim(-maxdim, maxdim)\n ax.set_ylim(-maxdim, maxdim)\n ax.minorticks_on()\n ax.set_aspect('equal')\n ax.set_xlabel('X offset (m)')\n ax.set_ylabel('Z offset (m)')\n e = Ellipse((0,0), 2*ce90, 2*le90, linewidth=0, alpha=0.1)\n ax.add_artist(e)\n\n ax = fig_ortho.add_subplot(133)\n ax.plot(y, z, color='b', linestyle='None', marker='o', label='ICP correction vector')\n ax.plot(cmean[1], cmean[2], color='r', linestyle='None', marker='s', label='Mean')\n #ax.scatter(y, z)\n #ax.scatter(cmean[1], cmean[2], color='r', marker='s')\n ax.set_xlim(-maxdim, maxdim)\n ax.set_ylim(-maxdim, maxdim)\n ax.minorticks_on()\n ax.set_aspect('equal')\n ax.set_xlabel('Y offset (m)')\n ax.set_ylabel('Z offset (m)')\n e = Ellipse((0,0), 2*ce90, 2*le90, linewidth=0, alpha=0.1)\n ax.add_artist(e)\n\n plt.tight_layout()\n\n #Note: postscript doesn't properly handle tansparency\n #fig_fn = 'icp_translation_vec_proj_meters_orthogonal.pdf'\n fig_fn = 'icp_translation_vec_local_meters_orthogonal.pdf'\n plt.savefig(fig_fn, dpi=600, bbox_inches='tight')\n\n #Set back to original figure\n plt.figure(fig.number)\n",
"def parse_pc_align_log(fn):\n import re\n error_dict = None\n #Determine log filename\n import glob\n log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*.log')\n if not log_fn:\n log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*align/*.log')\n\n if not log_fn:\n print \"Failed to locate align log for %s\" % fn\n else:\n log_fn = log_fn[0]\n print(log_fn)\n f = open(log_fn)\n\n error_dict = {}\n error_dict['File'] = fn\n error_dict['Date'] = timelib.fn_getdatetime(fn)\n\n #This handles cases where no sampling was performed\n error_dict['Input Sampled 16th Percentile Error'] = np.nan \n error_dict['Input Sampled Median Error'] = np.nan \n error_dict['Input Sampled 84th Percentile Error'] = np.nan \n error_dict['Input Sampled Error Spread'] = np.nan \n error_dict['Output Sampled 16th Percentile Error'] = np.nan \n error_dict['Output Sampled Median Error'] = np.nan \n error_dict['Output Sampled 84th Percentile Error'] = np.nan \n error_dict['Output Sampled Error Spread'] = np.nan \n #error_dict['Translation vector (North-East-Down, meters)'] = [np.nan, np.nan, np.nan]\n\n #Set default reference type to point\n error_dict['Ref type'] = 'point'\n\n temp = []\n for line in f:\n key = 'Loaded points'\n if key in line:\n temp.append(int(re.split(':', line.rstrip())[1]))\n key = 'Number of errors'\n if key in line:\n error_dict[key] = int(re.split(':', line.rstrip())[1])\n key = 'Input: error percentile'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Input 16th Percentile Error'] = float(line_a[3])\n error_dict['Input Median Error'] = float(line_a[5])\n error_dict['Input 84th Percentile Error'] = float(line_a[7])\n \"\"\"\n key = 'Input: error mean'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Input Mean Error'] = float(line_a[2])\n error_dict['Input Std Error'] = float(line_a[4])\n \"\"\"\n #This pulls the line \n #Input: mean of smallest errors: 25%: 7.82061, 50%: 9.71931, 75%: 10.9917, 100%: 12.2715\n #Want the final value\n key = 'Input: mean'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Input Mean Error'] = float(line_a[-1])\n key = 'Output: error percentile'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Output 16th Percentile Error'] = float(line_a[3])\n error_dict['Output Median Error'] = float(line_a[5])\n error_dict['Output 84th Percentile Error'] = float(line_a[7])\n \"\"\"\n key = 'Output: error mean'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Output Mean Error'] = float(line_a[2])\n error_dict['Output Std Error'] = float(line_a[4])\n \"\"\"\n key = 'Output: mean'\n if key in line:\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Output Mean Error'] = float(line_a[-1])\n key = 'Translation vector (Cartesian, meters)'\n #Previous versions of pc_align output this\n #key = 'Translation vector (meters)'\n if key in line:\n error_dict['Translation vector (Cartesian, meters)'] = list(float(i) for i in re.split('Vector3\\(', line.rstrip())[1][:-1].split(',')) \n #error_dict['Translation vector (meters)'] = list(float(i) for i in re.split('Vector3\\(', line.rstrip())[1][:-1].split(',')) \n key = 'Translation vector (North-East-Down, meters)'\n if key in line:\n error_dict['Translation vector (North-East-Down, meters)'] = list(float(i) for i in re.split('Vector3\\(', line.rstrip())[1][:-1].split(',')) \n key = 'Translation vector magnitude (meters)'\n if key in line:\n error_dict[key] = float(re.split(':', line.rstrip())[1])\n key = 'Translation vector (lat,lon,z)'\n if key in line:\n error_dict[key] = list(float(i) for i in re.split('Vector3\\(', line.rstrip())[1][:-1].split(',')) \n shift_proj = shift_ll2proj(fn, error_dict[key])\n key = 'Translation vector (Proj meters)'\n error_dict[key] = shift_proj\n\n #This is the output from the point sampling post-alignment\n key = 'Error percentiles'\n if key in line:\n #This is a hack to capture both sampling of input and output\n if 'Output Sampled 16th Percentile Error' in error_dict:\n error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']\n error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']\n error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']\n error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']\n line_a = re.split(': |, ', line.rstrip())\n error_dict['Output Sampled 16th Percentile Error'] = float(line_a[2])\n error_dict['Output Sampled Median Error'] = float(line_a[4])\n error_dict['Output Sampled 84th Percentile Error'] = float(line_a[6])\n error_dict['Output Sampled Error Spread'] = float(line_a[6]) - float(line_a[2])\n #key = 'compute_dh'\n #Note: these are not computed for absolute values by compute_dh\n key = 'count:'\n if key in line:\n error_dict['Ref type'] = 'grid'\n #This is a hack to capture both sampling of input and output\n if 'Output Sampled 16th Percentile Error' in error_dict:\n error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']\n error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']\n error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']\n error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']\n #Assume the following format for stats:\n #count: 349835 min: -51.39 max: 22.00 mean: 0.29 std: 0.49 med: 0.28 mad: 0.37 \\\n #q1: 0.04 q2: 0.54 iqr: 0.50 mode: 0.29 p16: -0.07 p84: 0.66 spread: 0.37\n line_a = re.split(': | ', line.rstrip())\n error_dict['Output Sampled 16th Percentile Error'] = float(line_a[23])\n error_dict['Output Sampled Median Error'] = float(line_a[11])\n error_dict['Output Sampled 84th Percentile Error'] = float(line_a[25])\n error_dict['Output Sampled Error Spread'] = float(line_a[25]) - float(line_a[23])\n key = 'Mean error'\n if key in line:\n if 'Output Sampled Mean Error' in error_dict:\n error_dict['Input Sampled Mean Error'] = error_dict['Output Sampled Mean Error']\n error_dict['Output Sampled Mean Error'] = float(re.split(':', line.rstrip())[1])\n key = 'RMSE'\n if key in line:\n if 'Output Sampled RMSE' in error_dict:\n error_dict['Input Sampled RMSE'] = error_dict['Output Sampled RMSE']\n error_dict['Output Sampled RMSE'] = float(re.split(':', line.rstrip())[1])\n key = 'Absolute Median Error'\n if key in line:\n if 'Output Absolute Median Error' in error_dict:\n error_dict['Input Absolute Median Error'] = error_dict['Output Absolute Median Error']\n error_dict['Output Absolute Median Error'] = float(re.split(':', line.rstrip())[1])\n\n error_dict['Source points'] = temp[0] \n error_dict['Reference points'] = temp[1] \n\n return error_dict\n",
"def make_plot(x,y,yerr=None,c='k',ms=4,label=None,abs=False):\n y_mean = y.mean()\n y_std = y.std()\n y_med = np.ma.median(y)\n y_nmad = malib.mad(y)\n #plt.plot(x, y, label=label, color=c, marker='o', linestyle='None')\n plt.scatter(x, y, label=label, color=c, marker='o', s=ms)\n if yerr is not None:\n plt.errorbar(x, y, yerr=yerr, color=c, linestyle='None', elinewidth=0.5, capsize=np.sqrt(ms), alpha=0.5)\n plt.axhline(y_med, color=c, linestyle='--', alpha=0.5)\n plt.axhline(y_med + y_nmad, color=c, linestyle=':', alpha=0.5)\n plt.axhline(y_med - y_nmad, color=c, linestyle=':', alpha=0.5)\n plt.axhline(0, color='k', linewidth=0.5, linestyle='-', alpha=0.5)\n ax = plt.gca()\n plt.minorticks_on()\n #ax.tick_params(axis='y',which='minor',left='on')\n if abs:\n ax.set_ylim(bottom=0.0)\n"
] |
#! /usr/bin/env python
#David Shean
#dshean@gmail.com
#Perform error analysis for DEM output from pc_align
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pygeotools.lib import timelib, geolib, malib
#shell
#filenames=$(ls WV*/dem*/*align/*-DEM.tif)
#error_analysis.py $filenames
#ipython
#filenames = !ls *align/*-DEM.tif
#run ~/src/demtools/error_analysis.py $filenames.s
#Compute standard error for all DEMs
#SE=SD/sqrt(n)
def shift_ll2proj(fn, llz):
from osgeo import gdal, osr
from pygeotools.lib import geolib
ds = gdal.Open(fn)
s_srs = geolib.wgs_srs
t_srs = geolib.get_ds_srs(ds)
shift = None
if t_srs is not None and not s_srs.IsSame(t_srs):
#center is lon, lat
#llz is lat, lon
c = geolib.get_center(ds, t_srs=s_srs)
c_shift = [c[0]+llz[1], c[1]+llz[0]]
ct = osr.CoordinateTransformation(s_srs, t_srs)
c_proj = list(ct.TransformPoint(*c)[0:2])
c_shift_proj = list(ct.TransformPoint(*c_shift)[0:2])
shift = list([c_shift_proj[0] - c_proj[0], c_shift_proj[1] - c_proj[1]])
shift.append(llz[2])
return shift
def parse_pc_align_log(fn):
import re
error_dict = None
#Determine log filename
import glob
log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*.log')
if not log_fn:
log_fn = glob.glob(fn.rsplit('-DEM', 1)[0]+'*align/*.log')
if not log_fn:
print "Failed to locate align log for %s" % fn
else:
log_fn = log_fn[0]
print(log_fn)
f = open(log_fn)
error_dict = {}
error_dict['File'] = fn
error_dict['Date'] = timelib.fn_getdatetime(fn)
#This handles cases where no sampling was performed
error_dict['Input Sampled 16th Percentile Error'] = np.nan
error_dict['Input Sampled Median Error'] = np.nan
error_dict['Input Sampled 84th Percentile Error'] = np.nan
error_dict['Input Sampled Error Spread'] = np.nan
error_dict['Output Sampled 16th Percentile Error'] = np.nan
error_dict['Output Sampled Median Error'] = np.nan
error_dict['Output Sampled 84th Percentile Error'] = np.nan
error_dict['Output Sampled Error Spread'] = np.nan
#error_dict['Translation vector (North-East-Down, meters)'] = [np.nan, np.nan, np.nan]
#Set default reference type to point
error_dict['Ref type'] = 'point'
temp = []
for line in f:
key = 'Loaded points'
if key in line:
temp.append(int(re.split(':', line.rstrip())[1]))
key = 'Number of errors'
if key in line:
error_dict[key] = int(re.split(':', line.rstrip())[1])
key = 'Input: error percentile'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Input 16th Percentile Error'] = float(line_a[3])
error_dict['Input Median Error'] = float(line_a[5])
error_dict['Input 84th Percentile Error'] = float(line_a[7])
"""
key = 'Input: error mean'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Input Mean Error'] = float(line_a[2])
error_dict['Input Std Error'] = float(line_a[4])
"""
#This pulls the line
#Input: mean of smallest errors: 25%: 7.82061, 50%: 9.71931, 75%: 10.9917, 100%: 12.2715
#Want the final value
key = 'Input: mean'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Input Mean Error'] = float(line_a[-1])
key = 'Output: error percentile'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Output 16th Percentile Error'] = float(line_a[3])
error_dict['Output Median Error'] = float(line_a[5])
error_dict['Output 84th Percentile Error'] = float(line_a[7])
"""
key = 'Output: error mean'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Output Mean Error'] = float(line_a[2])
error_dict['Output Std Error'] = float(line_a[4])
"""
key = 'Output: mean'
if key in line:
line_a = re.split(': |, ', line.rstrip())
error_dict['Output Mean Error'] = float(line_a[-1])
key = 'Translation vector (Cartesian, meters)'
#Previous versions of pc_align output this
#key = 'Translation vector (meters)'
if key in line:
error_dict['Translation vector (Cartesian, meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(','))
#error_dict['Translation vector (meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(','))
key = 'Translation vector (North-East-Down, meters)'
if key in line:
error_dict['Translation vector (North-East-Down, meters)'] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(','))
key = 'Translation vector magnitude (meters)'
if key in line:
error_dict[key] = float(re.split(':', line.rstrip())[1])
key = 'Translation vector (lat,lon,z)'
if key in line:
error_dict[key] = list(float(i) for i in re.split('Vector3\(', line.rstrip())[1][:-1].split(','))
shift_proj = shift_ll2proj(fn, error_dict[key])
key = 'Translation vector (Proj meters)'
error_dict[key] = shift_proj
#This is the output from the point sampling post-alignment
key = 'Error percentiles'
if key in line:
#This is a hack to capture both sampling of input and output
if 'Output Sampled 16th Percentile Error' in error_dict:
error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']
error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']
error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']
error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']
line_a = re.split(': |, ', line.rstrip())
error_dict['Output Sampled 16th Percentile Error'] = float(line_a[2])
error_dict['Output Sampled Median Error'] = float(line_a[4])
error_dict['Output Sampled 84th Percentile Error'] = float(line_a[6])
error_dict['Output Sampled Error Spread'] = float(line_a[6]) - float(line_a[2])
#key = 'compute_dh'
#Note: these are not computed for absolute values by compute_dh
key = 'count:'
if key in line:
error_dict['Ref type'] = 'grid'
#This is a hack to capture both sampling of input and output
if 'Output Sampled 16th Percentile Error' in error_dict:
error_dict['Input Sampled 16th Percentile Error'] = error_dict['Output Sampled 16th Percentile Error']
error_dict['Input Sampled Median Error'] = error_dict['Output Sampled Median Error']
error_dict['Input Sampled 84th Percentile Error'] = error_dict['Output Sampled 84th Percentile Error']
error_dict['Input Sampled Error Spread'] = error_dict['Output Sampled Error Spread']
#Assume the following format for stats:
#count: 349835 min: -51.39 max: 22.00 mean: 0.29 std: 0.49 med: 0.28 mad: 0.37 \
#q1: 0.04 q2: 0.54 iqr: 0.50 mode: 0.29 p16: -0.07 p84: 0.66 spread: 0.37
line_a = re.split(': | ', line.rstrip())
error_dict['Output Sampled 16th Percentile Error'] = float(line_a[23])
error_dict['Output Sampled Median Error'] = float(line_a[11])
error_dict['Output Sampled 84th Percentile Error'] = float(line_a[25])
error_dict['Output Sampled Error Spread'] = float(line_a[25]) - float(line_a[23])
key = 'Mean error'
if key in line:
if 'Output Sampled Mean Error' in error_dict:
error_dict['Input Sampled Mean Error'] = error_dict['Output Sampled Mean Error']
error_dict['Output Sampled Mean Error'] = float(re.split(':', line.rstrip())[1])
key = 'RMSE'
if key in line:
if 'Output Sampled RMSE' in error_dict:
error_dict['Input Sampled RMSE'] = error_dict['Output Sampled RMSE']
error_dict['Output Sampled RMSE'] = float(re.split(':', line.rstrip())[1])
key = 'Absolute Median Error'
if key in line:
if 'Output Absolute Median Error' in error_dict:
error_dict['Input Absolute Median Error'] = error_dict['Output Absolute Median Error']
error_dict['Output Absolute Median Error'] = float(re.split(':', line.rstrip())[1])
error_dict['Source points'] = temp[0]
error_dict['Reference points'] = temp[1]
return error_dict
def make_plot(x,y,yerr=None,c='k',ms=4,label=None,abs=False):
y_mean = y.mean()
y_std = y.std()
y_med = np.ma.median(y)
y_nmad = malib.mad(y)
#plt.plot(x, y, label=label, color=c, marker='o', linestyle='None')
plt.scatter(x, y, label=label, color=c, marker='o', s=ms)
if yerr is not None:
plt.errorbar(x, y, yerr=yerr, color=c, linestyle='None', elinewidth=0.5, capsize=np.sqrt(ms), alpha=0.5)
plt.axhline(y_med, color=c, linestyle='--', alpha=0.5)
plt.axhline(y_med + y_nmad, color=c, linestyle=':', alpha=0.5)
plt.axhline(y_med - y_nmad, color=c, linestyle=':', alpha=0.5)
plt.axhline(0, color='k', linewidth=0.5, linestyle='-', alpha=0.5)
ax = plt.gca()
plt.minorticks_on()
#ax.tick_params(axis='y',which='minor',left='on')
if abs:
ax.set_ylim(bottom=0.0)
#Draw ellipsoid in 3D plot
#http://stackoverflow.com/questions/7819498/plotting-ellipsoid-with-matplotlib
def make_plot3d(x, y, z, title=None, orthogonal_fig=True):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_aspect('equal')
ax.set_xlabel('X offset (m)')
ax.set_ylabel('Y offset (m)')
ax.set_zlabel('Z offset (m)')
if title is not None:
plt.suptitle(title)
ax.plot(x, y, z, 'o')
cmean = np.mean([x,y,z], axis=1)
cmed = np.median([x,y,z], axis=1)
ax.scatter(cmean[0], cmean[1], cmean[2], color='r', marker='s')
ce90 = geolib.CE90(x,y)
le90 = geolib.LE90(z)
coefs = [ce90, ce90, le90]
ax.set_title("CE90: %0.2f, LE90: %0.2f, n=%i" % (ce90, le90, x.shape[0]))
maxdim = np.ceil(np.max([np.max(np.abs([x, y, z])), ce90, le90]))
ax.set_xlim(-maxdim, maxdim)
ax.set_ylim(-maxdim, maxdim)
ax.set_zlim(-maxdim, maxdim)
rx, ry, rz = coefs
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
ex = rx * np.outer(np.cos(u), np.sin(v))
ey = ry * np.outer(np.sin(u), np.sin(v))
ez = rz * np.outer(np.ones_like(u), np.cos(v))
ax.plot_surface(ex, ey, ez, rstride=2, cstride=2, linewidth=0, color='b', alpha=0.1)
#max_radius = max(rx, ry, rz)
#for axis in 'xyz':
# getattr(ax, 'set_{}lim'.format(axis))((-max_radius, max_radius))
if orthogonal_fig:
from matplotlib.patches import Ellipse
fig_ortho = plt.figure(figsize=(10,4))
#fig_ortho = plt.figure()
title='ICP Alignment Translation Vectors\nn=%i, mean: (%0.2f, %0.2f, %0.2f)\nCE90: %0.2f, LE90: %0.2f' % (x.shape[0], cmean[0], cmean[1], cmean[2], ce90, le90)
plt.suptitle(title)
ax = fig_ortho.add_subplot(131)
ax.plot(x, y, color='b', linestyle='None', marker='o', label='ICP correction vector')
ax.plot(cmean[0], cmean[1], color='r', linestyle='None', marker='s', label='Mean')
#ax.scatter(x, y)
#ax.scatter(cmean[0], cmean[1], color='r', marker='s')
ax.set_xlim(-maxdim, maxdim)
ax.set_ylim(-maxdim, maxdim)
ax.minorticks_on()
ax.set_aspect('equal')
ax.set_xlabel('X offset (m)')
ax.set_ylabel('Y offset (m)')
e = Ellipse((0,0), 2*ce90, 2*ce90, linewidth=0, alpha=0.1)
ax.add_artist(e)
plt.legend(prop={'size':8}, numpoints=1, loc='upper left')
ax = fig_ortho.add_subplot(132)
ax.plot(x, z, color='b', linestyle='None', marker='o', label='ICP correction vector')
ax.plot(cmean[0], cmean[2], color='r', linestyle='None', marker='s', label='Mean')
#ax.scatter(x, z)
#ax.scatter(cmean[0], cmean[2], color='r', marker='s')
ax.set_xlim(-maxdim, maxdim)
ax.set_ylim(-maxdim, maxdim)
ax.minorticks_on()
ax.set_aspect('equal')
ax.set_xlabel('X offset (m)')
ax.set_ylabel('Z offset (m)')
e = Ellipse((0,0), 2*ce90, 2*le90, linewidth=0, alpha=0.1)
ax.add_artist(e)
ax = fig_ortho.add_subplot(133)
ax.plot(y, z, color='b', linestyle='None', marker='o', label='ICP correction vector')
ax.plot(cmean[1], cmean[2], color='r', linestyle='None', marker='s', label='Mean')
#ax.scatter(y, z)
#ax.scatter(cmean[1], cmean[2], color='r', marker='s')
ax.set_xlim(-maxdim, maxdim)
ax.set_ylim(-maxdim, maxdim)
ax.minorticks_on()
ax.set_aspect('equal')
ax.set_xlabel('Y offset (m)')
ax.set_ylabel('Z offset (m)')
e = Ellipse((0,0), 2*ce90, 2*le90, linewidth=0, alpha=0.1)
ax.add_artist(e)
plt.tight_layout()
#Note: postscript doesn't properly handle tansparency
#fig_fn = 'icp_translation_vec_proj_meters_orthogonal.pdf'
fig_fn = 'icp_translation_vec_local_meters_orthogonal.pdf'
plt.savefig(fig_fn, dpi=600, bbox_inches='tight')
#Set back to original figure
plt.figure(fig.number)
#plt.show()
if __name__ == '__main__':
main()
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
_check_cooling_parameters
|
python
|
def _check_cooling_parameters(radiuscooling, scalecooling):
if radiuscooling != "linear" and radiuscooling != "exponential":
raise Exception("Invalid parameter for radiuscooling: " +
radiuscooling)
if scalecooling != "linear" and scalecooling != "exponential":
raise Exception("Invalid parameter for scalecooling: " +
scalecooling)
|
Helper function to verify the cooling parameters of the training.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L663-L671
| null |
# -*- coding: utf-8 -*-
"""
The module contains the Somoclu class that trains and visualizes
self-organizing maps and emergent self-organizing maps.
Created on Sun July 26 15:07:47 2015
@author: Peter Wittek
"""
from __future__ import division, print_function
import sys
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
import numpy as np
from scipy.spatial.distance import cdist
try:
import seaborn as sns
from sklearn.metrics.pairwise import pairwise_distances
have_heatmap = True
except ImportError:
have_heatmap = False
try:
from .somoclu_wrap import train as wrap_train
except ImportError:
print("Warning: the binary library cannot be imported. You cannot train "
"maps, but you can load and analyze ones that you have already saved.")
if sys.platform.startswith('win'):
print("If you installed Somoclu with pip on Windows, this typically "
"means missing DLLs. Please refer to the documentation.")
elif sys.platform.startswith('darwin'):
print("If you installed Somoclu with pip on macOS, this typically "
"means missing a linked library. If you compiled Somoclu with "
"GCC, please make sure you have set DYLD_LIBRARY_PATH to include "
"the GCC path. For more information, please refer to the "
"documentation.")
else:
print("The problem occurs because either compilation failed when you "
"installed Somoclu or a path is missing from the dependencies "
"when you are trying to import it. Please refer to the "
"documentation to see your options.")
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
def _hexplot(matrix, fig, colormap):
"""Internal function to plot a hexagonal map.
"""
umatrix_min = matrix.min()
umatrix_max = matrix.max()
n_rows, n_columns = matrix.shape
cmap = plt.get_cmap(colormap)
offsets = np.zeros((n_columns * n_rows, 2))
facecolors = []
for row in range(n_rows):
for col in range(n_columns):
if row % 2 == 0:
offsets[row * n_columns + col] = [col +
0.5, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
else:
offsets[row * n_columns + col] = [col, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
polygon = np.zeros((6, 2), float)
polygon[:, 0] = 1.1 * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = 1.1 * np.array([-np.sqrt(3) / 6, np.sqrt(3) / 6,
np.sqrt(3) / 2 + np.sqrt(3) / 6,
np.sqrt(3) / 6, -np.sqrt(3) / 6,
-np.sqrt(3) / 2 - np.sqrt(3) / 6])
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
ax = fig.gca()
collection = mcoll.PolyCollection(
polygons,
offsets=offsets,
facecolors=facecolors,
edgecolors=facecolors,
linewidths=1.0,
offset_position="data")
ax.add_collection(collection, autolim=False)
corners = ((-0.5, -0.5), (n_columns + 0.5, 2 * n_rows + 0.5))
ax.update_datalim(corners)
ax.autoscale_view(tight=True)
return offsets
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
_hexplot
|
python
|
def _hexplot(matrix, fig, colormap):
umatrix_min = matrix.min()
umatrix_max = matrix.max()
n_rows, n_columns = matrix.shape
cmap = plt.get_cmap(colormap)
offsets = np.zeros((n_columns * n_rows, 2))
facecolors = []
for row in range(n_rows):
for col in range(n_columns):
if row % 2 == 0:
offsets[row * n_columns + col] = [col +
0.5, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
else:
offsets[row * n_columns + col] = [col, 2 * n_rows - 2 * row]
facecolors.append(cmap((matrix[row, col] - umatrix_min) /
(umatrix_max) * 255))
polygon = np.zeros((6, 2), float)
polygon[:, 0] = 1.1 * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])
polygon[:, 1] = 1.1 * np.array([-np.sqrt(3) / 6, np.sqrt(3) / 6,
np.sqrt(3) / 2 + np.sqrt(3) / 6,
np.sqrt(3) / 6, -np.sqrt(3) / 6,
-np.sqrt(3) / 2 - np.sqrt(3) / 6])
polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)
ax = fig.gca()
collection = mcoll.PolyCollection(
polygons,
offsets=offsets,
facecolors=facecolors,
edgecolors=facecolors,
linewidths=1.0,
offset_position="data")
ax.add_collection(collection, autolim=False)
corners = ((-0.5, -0.5), (n_columns + 0.5, 2 * n_rows + 0.5))
ax.update_datalim(corners)
ax.autoscale_view(tight=True)
return offsets
|
Internal function to plot a hexagonal map.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L674-L713
| null |
# -*- coding: utf-8 -*-
"""
The module contains the Somoclu class that trains and visualizes
self-organizing maps and emergent self-organizing maps.
Created on Sun July 26 15:07:47 2015
@author: Peter Wittek
"""
from __future__ import division, print_function
import sys
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import matplotlib.collections as mcoll
import numpy as np
from scipy.spatial.distance import cdist
try:
import seaborn as sns
from sklearn.metrics.pairwise import pairwise_distances
have_heatmap = True
except ImportError:
have_heatmap = False
try:
from .somoclu_wrap import train as wrap_train
except ImportError:
print("Warning: the binary library cannot be imported. You cannot train "
"maps, but you can load and analyze ones that you have already saved.")
if sys.platform.startswith('win'):
print("If you installed Somoclu with pip on Windows, this typically "
"means missing DLLs. Please refer to the documentation.")
elif sys.platform.startswith('darwin'):
print("If you installed Somoclu with pip on macOS, this typically "
"means missing a linked library. If you compiled Somoclu with "
"GCC, please make sure you have set DYLD_LIBRARY_PATH to include "
"the GCC path. For more information, please refer to the "
"documentation.")
else:
print("The problem occurs because either compilation failed when you "
"installed Somoclu or a path is missing from the dependencies "
"when you are trying to import it. Please refer to the "
"documentation to see your options.")
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
def _check_cooling_parameters(radiuscooling, scalecooling):
"""Helper function to verify the cooling parameters of the training.
"""
if radiuscooling != "linear" and radiuscooling != "exponential":
raise Exception("Invalid parameter for radiuscooling: " +
radiuscooling)
if scalecooling != "linear" and scalecooling != "exponential":
raise Exception("Invalid parameter for scalecooling: " +
scalecooling)
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.load_bmus
|
python
|
def load_bmus(self, filename):
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
|
Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L136-L154
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.load_umatrix
|
python
|
def load_umatrix(self, filename):
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
|
Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L156-L166
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.load_codebook
|
python
|
def load_codebook(self, filename):
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
|
Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L168-L181
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.train
|
python
|
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
|
Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L183-L231
|
[
"def _check_cooling_parameters(radiuscooling, scalecooling):\n \"\"\"Helper function to verify the cooling parameters of the training.\n \"\"\"\n if radiuscooling != \"linear\" and radiuscooling != \"exponential\":\n raise Exception(\"Invalid parameter for radiuscooling: \" +\n radiuscooling)\n if scalecooling != \"linear\" and scalecooling != \"exponential\":\n raise Exception(\"Invalid parameter for scalecooling: \" +\n scalecooling)\n",
"def update_data(self, data):\n \"\"\"Change the data set in the Somoclu object. It is useful when the\n data is updated and the training should continue on the new data.\n\n :param data: The training data.\n :type data: 2D numpy.array of float32.\n \"\"\"\n oldn_dim = self.n_dim\n if data.dtype != np.float32:\n print(\"Warning: data was not float32. A 32-bit copy was made\")\n self._data = np.float32(data)\n else:\n self._data = data\n self.n_vectors, self.n_dim = data.shape\n if self.n_dim != oldn_dim and oldn_dim != 0:\n raise Exception(\"The dimension of the new data does not match!\")\n self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)\n",
"def _init_codebook(self):\n \"\"\"Internal function to set the codebook or to indicate it to the C++\n code that it should be randomly initialized.\n \"\"\"\n codebook_size = self._n_columns * self._n_rows * self.n_dim\n if self.codebook is None:\n if self._initialization == \"random\":\n self.codebook = np.zeros(codebook_size, dtype=np.float32)\n self.codebook[0:2] = [1000, 2000]\n else:\n self._pca_init()\n elif self.codebook.size != codebook_size:\n raise Exception(\"Invalid size for initial codebook\")\n else:\n if self.codebook.dtype != np.float32:\n print(\"Warning: initialcodebook was not float32. A 32-bit \"\n \"copy was made\")\n self.codebook = np.float32(self.codebook)\n self.codebook.shape = (codebook_size, )\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.update_data
|
python
|
def update_data(self, data):
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
|
Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L233-L249
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.view_component_planes
|
python
|
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
|
Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L251-L293
|
[
"def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,\n bestmatchcolors, labels, zoom, filename):\n \"\"\"Internal function to plot a map with best matching units and labels.\n \"\"\"\n if zoom is None:\n zoom = ((0, self._n_rows), (0, self._n_columns))\n if figsize is None:\n figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))\n fig = plt.figure(figsize=figsize)\n if self._grid_type == \"hexagonal\":\n offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],\n zoom[1][0]:zoom[1][1]], fig, colormap)\n filtered_bmus = self._filter_array(self.bmus, zoom)\n filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]\n filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]\n bmu_coords = np.zeros(filtered_bmus.shape)\n for i, (row, col) in enumerate(filtered_bmus):\n bmu_coords[i] = offsets[col * zoom[1][1] + row]\n else:\n plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],\n aspect='auto', interpolation='bicubic')\n plt.set_cmap(colormap)\n bmu_coords = self._filter_array(self.bmus, zoom)\n bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]\n bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]\n if colorbar:\n cmap = cm.ScalarMappable(cmap=colormap)\n cmap.set_array(matrix)\n plt.colorbar(cmap, orientation='horizontal', shrink=0.5)\n\n if bestmatches:\n if bestmatchcolors is None:\n if self.clusters is None:\n colors = \"white\"\n else:\n colors = []\n for bm in self.bmus:\n colors.append(self.clusters[bm[1], bm[0]])\n colors = self._filter_array(colors, zoom)\n else:\n colors = self._filter_array(bestmatchcolors, zoom)\n plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)\n\n if labels is not None:\n for label, col, row in zip(self._filter_array(labels, zoom),\n bmu_coords[:, 0], bmu_coords[:, 1]):\n if label is not None:\n plt.annotate(label, xy=(col, row), xytext=(10, -5),\n textcoords='offset points', ha='left',\n va='bottom',\n bbox=dict(boxstyle='round,pad=0.3',\n fc='white', alpha=0.8))\n plt.axis('off')\n if filename is not None:\n plt.savefig(filename)\n else:\n plt.show()\n return plt\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.view_umatrix
|
python
|
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
|
Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L295-L327
|
[
"def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,\n bestmatchcolors, labels, zoom, filename):\n \"\"\"Internal function to plot a map with best matching units and labels.\n \"\"\"\n if zoom is None:\n zoom = ((0, self._n_rows), (0, self._n_columns))\n if figsize is None:\n figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))\n fig = plt.figure(figsize=figsize)\n if self._grid_type == \"hexagonal\":\n offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],\n zoom[1][0]:zoom[1][1]], fig, colormap)\n filtered_bmus = self._filter_array(self.bmus, zoom)\n filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]\n filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]\n bmu_coords = np.zeros(filtered_bmus.shape)\n for i, (row, col) in enumerate(filtered_bmus):\n bmu_coords[i] = offsets[col * zoom[1][1] + row]\n else:\n plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],\n aspect='auto', interpolation='bicubic')\n plt.set_cmap(colormap)\n bmu_coords = self._filter_array(self.bmus, zoom)\n bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]\n bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]\n if colorbar:\n cmap = cm.ScalarMappable(cmap=colormap)\n cmap.set_array(matrix)\n plt.colorbar(cmap, orientation='horizontal', shrink=0.5)\n\n if bestmatches:\n if bestmatchcolors is None:\n if self.clusters is None:\n colors = \"white\"\n else:\n colors = []\n for bm in self.bmus:\n colors.append(self.clusters[bm[1], bm[0]])\n colors = self._filter_array(colors, zoom)\n else:\n colors = self._filter_array(bestmatchcolors, zoom)\n plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)\n\n if labels is not None:\n for label, col, row in zip(self._filter_array(labels, zoom),\n bmu_coords[:, 0], bmu_coords[:, 1]):\n if label is not None:\n plt.annotate(label, xy=(col, row), xytext=(10, -5),\n textcoords='offset points', ha='left',\n va='bottom',\n bbox=dict(boxstyle='round,pad=0.3',\n fc='white', alpha=0.8))\n plt.axis('off')\n if filename is not None:\n plt.savefig(filename)\n else:\n plt.show()\n return plt\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.view_activation_map
|
python
|
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
|
Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L329-L397
|
[
"def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,\n bestmatchcolors, labels, zoom, filename):\n \"\"\"Internal function to plot a map with best matching units and labels.\n \"\"\"\n if zoom is None:\n zoom = ((0, self._n_rows), (0, self._n_columns))\n if figsize is None:\n figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))\n fig = plt.figure(figsize=figsize)\n if self._grid_type == \"hexagonal\":\n offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],\n zoom[1][0]:zoom[1][1]], fig, colormap)\n filtered_bmus = self._filter_array(self.bmus, zoom)\n filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]\n filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]\n bmu_coords = np.zeros(filtered_bmus.shape)\n for i, (row, col) in enumerate(filtered_bmus):\n bmu_coords[i] = offsets[col * zoom[1][1] + row]\n else:\n plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],\n aspect='auto', interpolation='bicubic')\n plt.set_cmap(colormap)\n bmu_coords = self._filter_array(self.bmus, zoom)\n bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]\n bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]\n if colorbar:\n cmap = cm.ScalarMappable(cmap=colormap)\n cmap.set_array(matrix)\n plt.colorbar(cmap, orientation='horizontal', shrink=0.5)\n\n if bestmatches:\n if bestmatchcolors is None:\n if self.clusters is None:\n colors = \"white\"\n else:\n colors = []\n for bm in self.bmus:\n colors.append(self.clusters[bm[1], bm[0]])\n colors = self._filter_array(colors, zoom)\n else:\n colors = self._filter_array(bestmatchcolors, zoom)\n plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)\n\n if labels is not None:\n for label, col, row in zip(self._filter_array(labels, zoom),\n bmu_coords[:, 0], bmu_coords[:, 1]):\n if label is not None:\n plt.annotate(label, xy=(col, row), xytext=(10, -5),\n textcoords='offset points', ha='left',\n va='bottom',\n bbox=dict(boxstyle='round,pad=0.3',\n fc='white', alpha=0.8))\n plt.axis('off')\n if filename is not None:\n plt.savefig(filename)\n else:\n plt.show()\n return plt\n",
"def get_surface_state(self, data=None):\n \"\"\"Return the Euclidean distance between codebook and data.\n\n :param data: Optional parameter to specify data, otherwise the\n data used previously to train the SOM is used.\n :type data: 2D numpy.array of float32.\n\n :returns: The the dot product of the codebook and the data.\n :rtype: 2D numpy.array\n \"\"\"\n\n if data is None:\n d = self._data\n else:\n d = data\n\n codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])\n parts = np.array_split(d, 200, axis=0)\n am = np.empty((0, (self._n_columns * self._n_rows)), dtype=\"float64\")\n for part in parts:\n am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)\n\n if data is None:\n self.activation_map = am\n return am\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu._view_matrix
|
python
|
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
|
Internal function to plot a map with best matching units and labels.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L399-L456
|
[
"def _hexplot(matrix, fig, colormap):\n \"\"\"Internal function to plot a hexagonal map.\n \"\"\"\n umatrix_min = matrix.min()\n umatrix_max = matrix.max()\n n_rows, n_columns = matrix.shape\n cmap = plt.get_cmap(colormap)\n offsets = np.zeros((n_columns * n_rows, 2))\n facecolors = []\n for row in range(n_rows):\n for col in range(n_columns):\n if row % 2 == 0:\n offsets[row * n_columns + col] = [col +\n 0.5, 2 * n_rows - 2 * row]\n facecolors.append(cmap((matrix[row, col] - umatrix_min) /\n (umatrix_max) * 255))\n else:\n offsets[row * n_columns + col] = [col, 2 * n_rows - 2 * row]\n facecolors.append(cmap((matrix[row, col] - umatrix_min) /\n (umatrix_max) * 255))\n polygon = np.zeros((6, 2), float)\n polygon[:, 0] = 1.1 * np.array([0.5, 0.5, 0.0, -0.5, -0.5, 0.0])\n polygon[:, 1] = 1.1 * np.array([-np.sqrt(3) / 6, np.sqrt(3) / 6,\n np.sqrt(3) / 2 + np.sqrt(3) / 6,\n np.sqrt(3) / 6, -np.sqrt(3) / 6,\n -np.sqrt(3) / 2 - np.sqrt(3) / 6])\n polygons = np.expand_dims(polygon, 0) + np.expand_dims(offsets, 1)\n ax = fig.gca()\n collection = mcoll.PolyCollection(\n polygons,\n offsets=offsets,\n facecolors=facecolors,\n edgecolors=facecolors,\n linewidths=1.0,\n offset_position=\"data\")\n ax.add_collection(collection, autolim=False)\n corners = ((-0.5, -0.5), (n_columns + 0.5, 2 * n_rows + 0.5))\n ax.update_datalim(corners)\n ax.autoscale_view(tight=True)\n return offsets\n",
"def _filter_array(self, a, zoom):\n filtered_array = []\n for index, bmu in enumerate(self.bmus):\n if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \\\n bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:\n filtered_array.append(a[index])\n return np.array(filtered_array)\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu._check_parameters
|
python
|
def _check_parameters(self):
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
|
Internal function to verify the basic parameters of the SOM.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L466-L483
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu._init_codebook
|
python
|
def _init_codebook(self):
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
|
Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L511-L529
|
[
"def _pca_init(self):\n try:\n from sklearn.decomposition import PCA\n pca = PCA(n_components=2, svd_solver=\"randomized\")\n except:\n from sklearn.decomposition import RandomizedPCA\n pca = RandomizedPCA(n_components=2)\n coord = np.zeros((self._n_columns * self._n_rows, 2))\n for i in range(self._n_columns * self._n_rows):\n coord[i, 0] = int(i / self._n_columns)\n coord[i, 1] = int(i % self._n_columns)\n coord = coord / [self._n_rows - 1, self._n_columns - 1]\n coord = (coord - .5) * 2\n me = np.mean(self._data, 0)\n self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))\n pca.fit(self._data - me)\n eigvec = pca.components_\n eigval = pca.explained_variance_\n norms = np.linalg.norm(eigvec, axis=1)\n eigvec = ((eigvec.T / norms) * eigval).T\n for j in range(self._n_columns * self._n_rows):\n for i in range(eigvec.shape[0]):\n self.codebook[j, :] = self.codebook[j, :] + \\\n coord[j, i] * eigvec[i, :]\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.cluster
|
python
|
def cluster(self, algorithm=None):
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
|
Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L531-L556
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.get_surface_state
|
python
|
def get_surface_state(self, data=None):
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
|
Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L558-L582
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.get_bmus
|
python
|
def get_bmus(self, activation_map):
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
|
Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L584-L597
| null |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
"""Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
peterwittek/somoclu
|
src/Python/somoclu/train.py
|
Somoclu.view_similarity_matrix
|
python
|
def view_similarity_matrix(self, data=None, labels=None, figsize=None,
filename=None):
if not have_heatmap:
raise Exception("Import dependencies missing for viewing "
"similarity matrix. You must have seaborn and "
"scikit-learn")
if data is None and self.activation_map is None:
self.get_surface_state()
if data is None:
X = self.activation_map
else:
X = data
# Calculate the pairwise correlations as a metric for similarity
corrmat = 1 - pairwise_distances(X, metric="correlation")
# Set up the matplotlib figure
if figsize is None:
figsize = (12, 9)
f, ax = plt.subplots(figsize=figsize)
# Y axis has inverted labels (seaborn default, no idea why)
if labels is None:
xticklabels = []
yticklabels = []
else:
xticklabels = labels
yticklabels = labels
# Draw the heatmap using seaborn
sns.heatmap(corrmat, vmax=1, vmin=-1, square=True,
xticklabels=xticklabels, yticklabels=yticklabels,
cmap="RdBu_r", center=0)
f.tight_layout()
# This sets the ticks to a readable angle
plt.yticks(rotation=0)
plt.xticks(rotation=90)
# This sets the labels for the two axes
ax.set_yticklabels(yticklabels, ha='right', va='center', size=8)
ax.set_xticklabels(xticklabels, ha='center', va='top', size=8)
# Save and close the figure
if filename is not None:
plt.savefig(filename, bbox_inches='tight')
else:
plt.show()
return plt
|
Plot the similarity map according to the activation map
:param data: Optional parameter for data points to calculate the
similarity with
:type data: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/somoclu/train.py#L599-L660
|
[
"def get_surface_state(self, data=None):\n \"\"\"Return the Euclidean distance between codebook and data.\n\n :param data: Optional parameter to specify data, otherwise the\n data used previously to train the SOM is used.\n :type data: 2D numpy.array of float32.\n\n :returns: The the dot product of the codebook and the data.\n :rtype: 2D numpy.array\n \"\"\"\n\n if data is None:\n d = self._data\n else:\n d = data\n\n codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])\n parts = np.array_split(d, 200, axis=0)\n am = np.empty((0, (self._n_columns * self._n_rows)), dtype=\"float64\")\n for part in parts:\n am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)\n\n if data is None:\n self.activation_map = am\n return am\n"
] |
class Somoclu(object):
"""Class for training and visualizing a self-organizing map.
Attributes:
codebook The codebook of the self-organizing map.
bmus The BMUs corresponding to the data points.
:param n_columns: The number of columns in the map.
:type n_columns: int.
:param n_rows: The number of rows in the map.
:type n_rows: int.
:param initialcodebook: Optional parameter to start the training with a
given codebook.
:type initialcodebook: 2D numpy.array of float32.
:param kerneltype: Optional parameter to specify which kernel to use:
* 0: dense CPU kernel (default)
* 1: dense GPU kernel (if compiled with it)
:type kerneltype: int.
:param maptype: Optional parameter to specify the map topology:
* "planar": Planar map (default)
* "toroid": Toroid map
:type maptype: str.
:param gridtype: Optional parameter to specify the grid form of the nodes:
* "rectangular": rectangular neurons (default)
* "hexagonal": hexagonal neurons
:type gridtype: str.
:param compactsupport: Optional parameter to cut off map updates beyond the
training radius with the Gaussian neighborhood.
Default: True.
:type compactsupport: bool.
:param neighborhood: Optional parameter to specify the neighborhood:
* "gaussian": Gaussian neighborhood (default)
* "bubble": bubble neighborhood function
:type neighborhood: str.
:param std_coeff: Optional parameter to set the coefficient in the Gaussian
neighborhood function exp(-||x-y||^2/(2*(coeff*radius)^2))
Default: 0.5
:type std_coeff: float.
:param initialization: Optional parameter to specify the initalization:
* "random": random weights in the codebook
* "pca": codebook is initialized from the first
subspace spanned by the first two eigenvectors of
the correlation matrix
:type initialization: str.
:param verbose: Optional parameter to specify verbosity (0, 1, or 2).
:type verbose: int.
"""
def __init__(self, n_columns, n_rows, initialcodebook=None,
kerneltype=0, maptype="planar", gridtype="rectangular",
compactsupport=True, neighborhood="gaussian", std_coeff=0.5,
initialization=None, data=None, verbose=0):
"""Constructor for the class.
"""
self._n_columns, self._n_rows = n_columns, n_rows
self._kernel_type = kerneltype
self._map_type = maptype
self._grid_type = gridtype
self._compact_support = compactsupport
self._neighborhood = neighborhood
self._std_coeff = std_coeff
self._verbose = verbose
self._check_parameters()
self.activation_map = None
if initialcodebook is not None and initialization is not None:
raise Exception("An initial codebook is given but initilization"
" is also requested")
self.bmus = None
self.umatrix = np.zeros(n_columns * n_rows, dtype=np.float32)
self.codebook = initialcodebook
if initialization is None or initialization == "random":
self._initialization = "random"
elif initialization == "pca":
self._initialization = "pca"
else:
raise Exception("Unknown initialization method")
self.n_vectors = 0
self.n_dim = 0
self.clusters = None
self._data = None
if data is not None:
print("Warning: passing the data in the constructor is deprecated.")
self.update_data(data)
def load_bmus(self, filename):
"""Load the best matching units from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.bmus = np.loadtxt(filename, comments='%', usecols=(1, 2))
if self.n_vectors != 0 and len(self.bmus) != self.n_vectors:
raise Exception("The number of best matching units does not match "
"the number of data instances")
else:
self.n_vectors = len(self.bmus)
tmp = self.bmus[:, 0].copy()
self.bmus[:, 0] = self.bmus[:, 1].copy()
self.bmus[:, 1] = tmp
if max(self.bmus[:, 0]) > self._n_columns - 1 or \
max(self.bmus[:, 1]) > self._n_rows - 1:
raise Exception("The dimensions of the best matching units do not "
"match that of the map")
def load_umatrix(self, filename):
"""Load the umatrix from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.umatrix = np.loadtxt(filename, comments='%')
if self.umatrix.shape != (self._n_rows, self._n_columns):
raise Exception("The dimensions of the U-matrix do not "
"match that of the map")
def load_codebook(self, filename):
"""Load the codebook from a file to the Somoclu object.
:param filename: The name of the file.
:type filename: str.
"""
self.codebook = np.loadtxt(filename, comments='%')
if self.n_dim == 0:
self.n_dim = self.codebook.shape[1]
if self.codebook.shape != (self._n_rows * self._n_columns,
self.n_dim):
raise Exception("The dimensions of the codebook do not "
"match that of the map")
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def train(self, data=None, epochs=10, radius0=0, radiusN=1,
radiuscooling="linear",
scale0=0.1, scaleN=0.01, scalecooling="linear"):
"""Train the map on the current data in the Somoclu object.
:param data: Optional parameter to provide training data. It is not
necessary if the data was added via the method
`update_data`.
:type data: 2D numpy.array of float32.
:param epochs: The number of epochs to train the map for.
:type epochs: int.
:param radius0: The initial radius on the map where the update happens
around a best matching unit. Default value of 0 will
trigger a value of min(n_columns, n_rows)/2.
:type radius0: float.
:param radiusN: The radius on the map where the update happens around a
best matching unit in the final epoch. Default: 1.
:type radiusN: float.
:param radiuscooling: The cooling strategy between radius0 and radiusN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:param scale0: The initial learning scale. Default value: 0.1.
:type scale0: float.
:param scaleN: The learning scale in the final epoch. Default: 0.01.
:type scaleN: float.
:param scalecooling: The cooling strategy between scale0 and scaleN:
* "linear": Linear interpolation (default)
* "exponential": Exponential decay
:type scalecooling: str.
"""
_check_cooling_parameters(radiuscooling, scalecooling)
if self._data is None and data is None:
raise Exception("No data was provided!")
elif data is not None:
self.update_data(data)
self._init_codebook()
self.umatrix.shape = (self._n_rows * self._n_columns, )
self.bmus.shape = (self.n_vectors * 2, )
wrap_train(np.ravel(self._data), epochs, self._n_columns, self._n_rows,
self.n_dim, self.n_vectors, radius0, radiusN,
radiuscooling, scale0, scaleN, scalecooling,
self._kernel_type, self._map_type, self._grid_type,
self._compact_support, self._neighborhood == "gaussian",
self._std_coeff, self._verbose, self.codebook, self.bmus,
self.umatrix)
self.umatrix.shape = (self._n_rows, self._n_columns)
self.bmus.shape = (self.n_vectors, 2)
self.codebook.shape = (self._n_rows, self._n_columns, self.n_dim)
def update_data(self, data):
"""Change the data set in the Somoclu object. It is useful when the
data is updated and the training should continue on the new data.
:param data: The training data.
:type data: 2D numpy.array of float32.
"""
oldn_dim = self.n_dim
if data.dtype != np.float32:
print("Warning: data was not float32. A 32-bit copy was made")
self._data = np.float32(data)
else:
self._data = data
self.n_vectors, self.n_dim = data.shape
if self.n_dim != oldn_dim and oldn_dim != 0:
raise Exception("The dimension of the new data does not match!")
self.bmus = np.zeros(self.n_vectors * 2, dtype=np.intc)
def view_component_planes(self, dimensions=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Observe the component planes in the codebook of the SOM.
:param dimensions: Optional parameter to specify along which dimension
or dimensions should the plotting happen. By
default, each dimension is plotted in a sequence of
plots.
:type dimension: int or list of int.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.codebook is None:
raise Exception("The codebook is not available. Either train a map"
" or load a codebook from a file")
if dimensions is None:
dimensions = range(self.n_dim)
for i in dimensions:
plt = self._view_matrix(self.codebook[:, :, i], figsize, colormap,
colorbar, bestmatches, bestmatchcolors,
labels, zoom, filename)
return plt
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r,
colorbar=False, bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the U-matrix of the trained map.
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if self.umatrix is None:
raise Exception("The U-matrix is not available. Either train a map"
" or load a U-matrix from a file")
return self._view_matrix(self.umatrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def view_activation_map(self, data_vector=None, data_index=None,
activation_map=None, figsize=None,
colormap=cm.Spectral_r, colorbar=False,
bestmatches=False, bestmatchcolors=None,
labels=None, zoom=None, filename=None):
"""Plot the activation map of a given data instance or a new data
vector
:param data_vector: Optional parameter for a new vector
:type data_vector: numpy.array
:param data_index: Optional parameter for the index of the data instance
:type data_index: int.
:param activation_map: Optional parameter to pass the an activation map
:type activation_map: numpy.array
:param figsize: Optional parameter to specify the size of the figure.
:type figsize: (int, int)
:param colormap: Optional parameter to specify the color map to be
used.
:type colormap: matplotlib.colors.Colormap
:param colorbar: Optional parameter to include a colormap as legend.
:type colorbar: bool.
:param bestmatches: Optional parameter to plot best matching units.
:type bestmatches: bool.
:param bestmatchcolors: Optional parameter to specify the color of each
best matching unit.
:type bestmatchcolors: list of int.
:param labels: Optional parameter to specify the label of each point.
:type labels: list of str.
:param zoom: Optional parameter to zoom into a region on the map. The
first two coordinates of the tuple are the row limits, the
second tuple contains the column limits.
:type zoom: ((int, int), (int, int))
:param filename: If specified, the plot will not be shown but saved to
this file.
:type filename: str.
"""
if data_vector is None and data_index is None:
raise Exception("Either specify a vector to see its activation "
"or give an index of the training data instances")
if data_vector is not None and data_index is not None:
raise Exception("You cannot specify both a data vector and the "
"index of a training data instance")
if data_vector is not None and activation_map is not None:
raise Exception("You cannot pass a previously computated"
"activation map with a data vector")
if data_vector is not None:
try:
d1, _ = data_vector.shape
w = data_vector.copy()
except ValueError:
d1, _ = data_vector.shape
w = data_vector.reshape(1, d1)
if w.shape[1] == 1:
w = w.T
matrix = cdist(self.codebook.reshape((self.codebook.shape[0] *
self.codebook.shape[1],
self.codebook.shape[2])),
w, 'euclidean').T
matrix.shape = (self.codebook.shape[0], self.codebook.shape[1])
else:
if activation_map is None and self.activation_map is None:
self.get_surface_state()
if activation_map is None:
activation_map = self.activation_map
matrix = activation_map[data_index].reshape((self.codebook.shape[0],
self.codebook.shape[1]))
return self._view_matrix(matrix, figsize, colormap, colorbar,
bestmatches, bestmatchcolors, labels, zoom,
filename)
def _view_matrix(self, matrix, figsize, colormap, colorbar, bestmatches,
bestmatchcolors, labels, zoom, filename):
"""Internal function to plot a map with best matching units and labels.
"""
if zoom is None:
zoom = ((0, self._n_rows), (0, self._n_columns))
if figsize is None:
figsize = (8, 8 / float(zoom[1][1] / zoom[0][1]))
fig = plt.figure(figsize=figsize)
if self._grid_type == "hexagonal":
offsets = _hexplot(matrix[zoom[0][0]:zoom[0][1],
zoom[1][0]:zoom[1][1]], fig, colormap)
filtered_bmus = self._filter_array(self.bmus, zoom)
filtered_bmus[:, 0] = filtered_bmus[:, 0] - zoom[1][0]
filtered_bmus[:, 1] = filtered_bmus[:, 1] - zoom[0][0]
bmu_coords = np.zeros(filtered_bmus.shape)
for i, (row, col) in enumerate(filtered_bmus):
bmu_coords[i] = offsets[col * zoom[1][1] + row]
else:
plt.imshow(matrix[zoom[0][0]:zoom[0][1], zoom[1][0]:zoom[1][1]],
aspect='auto', interpolation='bicubic')
plt.set_cmap(colormap)
bmu_coords = self._filter_array(self.bmus, zoom)
bmu_coords[:, 0] = bmu_coords[:, 0] - zoom[1][0]
bmu_coords[:, 1] = bmu_coords[:, 1] - zoom[0][0]
if colorbar:
cmap = cm.ScalarMappable(cmap=colormap)
cmap.set_array(matrix)
plt.colorbar(cmap, orientation='horizontal', shrink=0.5)
if bestmatches:
if bestmatchcolors is None:
if self.clusters is None:
colors = "white"
else:
colors = []
for bm in self.bmus:
colors.append(self.clusters[bm[1], bm[0]])
colors = self._filter_array(colors, zoom)
else:
colors = self._filter_array(bestmatchcolors, zoom)
plt.scatter(bmu_coords[:, 0], bmu_coords[:, 1], c=colors)
if labels is not None:
for label, col, row in zip(self._filter_array(labels, zoom),
bmu_coords[:, 0], bmu_coords[:, 1]):
if label is not None:
plt.annotate(label, xy=(col, row), xytext=(10, -5),
textcoords='offset points', ha='left',
va='bottom',
bbox=dict(boxstyle='round,pad=0.3',
fc='white', alpha=0.8))
plt.axis('off')
if filename is not None:
plt.savefig(filename)
else:
plt.show()
return plt
def _filter_array(self, a, zoom):
filtered_array = []
for index, bmu in enumerate(self.bmus):
if bmu[0] >= zoom[1][0] and bmu[0] < zoom[1][1] and \
bmu[1] >= zoom[0][0] and bmu[1] < zoom[0][1]:
filtered_array.append(a[index])
return np.array(filtered_array)
def _check_parameters(self):
"""Internal function to verify the basic parameters of the SOM.
"""
if self._map_type != "planar" and self._map_type != "toroid":
raise Exception("Invalid parameter for _map_type: " +
self._map_type)
if self._grid_type != "rectangular" and self._grid_type != "hexagonal":
raise Exception("Invalid parameter for _grid_type: " +
self._grid_type)
if self._neighborhood != "gaussian" and self._neighborhood != "bubble":
raise Exception("Invalid parameter for neighborhood: " +
self._neighborhood)
if self._kernel_type != 0 and self._kernel_type != 1:
raise Exception("Invalid parameter for kernelTye: " +
self._kernel_type)
if self._verbose < 0 and self._verbose > 2:
raise Exception("Invalid parameter for verbose: " +
self._kernel_type)
def _pca_init(self):
try:
from sklearn.decomposition import PCA
pca = PCA(n_components=2, svd_solver="randomized")
except:
from sklearn.decomposition import RandomizedPCA
pca = RandomizedPCA(n_components=2)
coord = np.zeros((self._n_columns * self._n_rows, 2))
for i in range(self._n_columns * self._n_rows):
coord[i, 0] = int(i / self._n_columns)
coord[i, 1] = int(i % self._n_columns)
coord = coord / [self._n_rows - 1, self._n_columns - 1]
coord = (coord - .5) * 2
me = np.mean(self._data, 0)
self.codebook = np.tile(me, (self._n_columns * self._n_rows, 1))
pca.fit(self._data - me)
eigvec = pca.components_
eigval = pca.explained_variance_
norms = np.linalg.norm(eigvec, axis=1)
eigvec = ((eigvec.T / norms) * eigval).T
for j in range(self._n_columns * self._n_rows):
for i in range(eigvec.shape[0]):
self.codebook[j, :] = self.codebook[j, :] + \
coord[j, i] * eigvec[i, :]
def _init_codebook(self):
"""Internal function to set the codebook or to indicate it to the C++
code that it should be randomly initialized.
"""
codebook_size = self._n_columns * self._n_rows * self.n_dim
if self.codebook is None:
if self._initialization == "random":
self.codebook = np.zeros(codebook_size, dtype=np.float32)
self.codebook[0:2] = [1000, 2000]
else:
self._pca_init()
elif self.codebook.size != codebook_size:
raise Exception("Invalid size for initial codebook")
else:
if self.codebook.dtype != np.float32:
print("Warning: initialcodebook was not float32. A 32-bit "
"copy was made")
self.codebook = np.float32(self.codebook)
self.codebook.shape = (codebook_size, )
def cluster(self, algorithm=None):
"""Cluster the codebook. The clusters of the data instances can be
assigned based on the BMUs. The method populates the class variable
Somoclu.clusters. If viewing methods are called after clustering, but
without colors for best matching units, colors will be automatically
assigned based on cluster membership.
:param algorithm: Optional parameter to specify a scikit-learn
clustering algorithm. The default is K-means with
eight clusters.
:type filename: sklearn.base.ClusterMixin.
"""
import sklearn.base
if algorithm is None:
import sklearn.cluster
algorithm = sklearn.cluster.KMeans()
elif not isinstance(algorithm, sklearn.base.ClusterMixin):
raise Exception("Cannot use algorithm of type " + type(algorithm))
original_shape = self.codebook.shape
self.codebook.shape = (self._n_columns * self._n_rows, self.n_dim)
linear_clusters = algorithm.fit_predict(self.codebook)
self.codebook.shape = original_shape
self.clusters = np.zeros((self._n_rows, self._n_columns), dtype=int)
for i, c in enumerate(linear_clusters):
self.clusters[i // self._n_columns, i % self._n_columns] = c
def get_surface_state(self, data=None):
"""Return the Euclidean distance between codebook and data.
:param data: Optional parameter to specify data, otherwise the
data used previously to train the SOM is used.
:type data: 2D numpy.array of float32.
:returns: The the dot product of the codebook and the data.
:rtype: 2D numpy.array
"""
if data is None:
d = self._data
else:
d = data
codebookReshaped = self.codebook.reshape(self.codebook.shape[0] * self.codebook.shape[1], self.codebook.shape[2])
parts = np.array_split(d, 200, axis=0)
am = np.empty((0, (self._n_columns * self._n_rows)), dtype="float64")
for part in parts:
am = np.concatenate((am, (cdist((part), codebookReshaped, 'euclidean'))), axis=0)
if data is None:
self.activation_map = am
return am
def get_bmus(self, activation_map):
"""Returns Best Matching Units indexes of the activation map.
:param activation_map: Activation map computed with self.get_surface_state()
:type activation_map: 2D numpy.array
:returns: The bmus indexes corresponding to this activation map
(same as self.bmus for the training samples).
:rtype: 2D numpy.array
"""
Y, X = np.unravel_index(activation_map.argmin(axis=1),
(self._n_rows, self._n_columns))
return np.vstack((X, Y)).T
|
peterwittek/somoclu
|
src/Python/setup.py
|
customize_compiler_for_nvcc
|
python
|
def customize_compiler_for_nvcc(self):
'''This is a verbatim copy of the NVCC compiler extension from
https://github.com/rmcgibbo/npcuda-example
'''
self.src_extensions.append('.cu')
default_compiler_so = self.compiler_so
super = self._compile
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
self.set_executable('compiler_so', CUDA['nvcc'])
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['cc']
super(obj, src, ext, cc_args, postargs, pp_opts)
self.compiler_so = default_compiler_so
self._compile = _compile
|
This is a verbatim copy of the NVCC compiler extension from
https://github.com/rmcgibbo/npcuda-example
|
train
|
https://github.com/peterwittek/somoclu/blob/b31dfbeba6765e64aedddcf8259626d6684f5349/src/Python/setup.py#L56-L73
| null |
#!/usr/bin/env python
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import numpy
import os
import sys
import platform
import traceback
win_cuda_dir = None
def find_cuda():
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = os.path.join(home, 'bin', 'nvcc')
else:
nvcc = None
for dir in os.environ['PATH'].split(os.pathsep):
binpath = os.path.join(dir, 'nvcc')
if os.path.exists(binpath):
nvcc = os.path.abspath(binpath)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be located in '
'your $PATH. Either add it to your path, or'
'set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home': home, 'nvcc': nvcc,
'include': os.path.join(home, 'include')}
for k, v in cudaconfig.items():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in '
'%s' % (k, v))
libdir = os.path.join(home, 'lib')
arch = int(platform.architecture()[0][0:2])
if sys.platform.startswith('win'):
os.path.join(libdir, "x"+str(arch))
if os.path.exists(os.path.join(home, libdir + "64")):
cudaconfig['lib'] = libdir + "64"
elif os.path.exists(os.path.join(home, libdir)):
cudaconfig['lib'] = libdir
else:
raise EnvironmentError('The CUDA libraries could not be located')
return cudaconfig
try:
CUDA = find_cuda()
except EnvironmentError:
CUDA = None
print("Proceeding without CUDA")
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
cmdclass = {}
if sys.platform.startswith('win') and win_cuda_dir is not None:
if win_cuda_dir == "":
if 'CUDA_PATH' in os.environ:
win_cuda_dir = os.environ['CUDA_PATH']
elif os.path.exists(win_cuda_dir):
pass
else:
win_cuda_dir = None
if win_cuda_dir:
arch = int(platform.architecture()[0][0:2])
somoclu_module = Extension('_somoclu_wrap',
sources=['somoclu/somoclu_wrap.cxx'],
extra_objects=[
'somoclu/src/denseCpuKernels.obj',
'somoclu/src/sparseCpuKernels.obj',
'somoclu/src/training.obj',
'somoclu/src/mapDistanceFunctions.obj',
'somoclu/src/uMatrix.obj',
'somoclu/src/denseGpuKernels.cu.obj'],
define_macros=[('CUDA', None)],
library_dirs=[win_cuda_dir+"/lib/x"+str(arch)],
libraries=['cudart', 'cublas'],
include_dirs=[numpy_include])
else:
sources_files = ['somoclu/src/denseCpuKernels.cpp',
'somoclu/src/sparseCpuKernels.cpp',
'somoclu/src/mapDistanceFunctions.cpp',
'somoclu/src/training.cpp',
'somoclu/src/uMatrix.cpp',
'somoclu/somoclu_wrap.cxx']
if sys.platform.startswith('win'):
extra_compile_args = ['-openmp']
cmdclass = {}
somoclu_module = Extension('_somoclu_wrap',
sources=sources_files,
include_dirs=[numpy_include, 'src'],
extra_compile_args=extra_compile_args,
)
else:
extra_compile_args = ['-fopenmp']
if 'CC' in os.environ and 'clang-omp' in os.environ['CC']:
openmp = 'iomp5'
else:
openmp = 'gomp'
cmdclass = {'build_ext': custom_build_ext}
somoclu_module = Extension('_somoclu_wrap',
sources=sources_files,
include_dirs=[numpy_include, 'src'],
extra_compile_args={'cc': extra_compile_args},
libraries=[openmp],
)
if CUDA is not None:
somoclu_module.sources.append('somoclu/src/denseGpuKernels.cu')
somoclu_module.define_macros = [('CUDA', None)]
somoclu_module.include_dirs.append(CUDA['include'])
somoclu_module.library_dirs = [CUDA['lib']]
somoclu_module.libraries += ['cudart', 'cublas']
somoclu_module.runtime_library_dirs = [CUDA['lib']]
somoclu_module.extra_compile_args['nvcc']=['-use_fast_math',
'--ptxas-options=-v', '-c',
'--compiler-options','-fPIC ' +
extra_compile_args[0]]
try:
setup(name='somoclu',
version='1.7.5',
license='GPL3',
author="Peter Wittek, Shi Chao Gao",
author_email="",
maintainer="shichaogao",
maintainer_email="xgdgsc@gmail.com",
url="https://somoclu.readthedocs.io/",
platforms=["unix", "windows"],
description="Massively parallel implementation of self-organizing maps",
ext_modules=[somoclu_module],
packages=["somoclu"],
install_requires=['numpy', 'matplotlib', 'scipy'],
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++'
],
cmdclass=cmdclass,
test_suite="tests"
)
except:
traceback.print_exc()
setup(name='somoclu',
version='1.7.5',
license='GPL3',
author="Peter Wittek, Shi Chao Gao",
author_email="",
maintainer="shichaogao",
maintainer_email="xgdgsc@gmail.com",
url="https://somoclu.readthedocs.io/",
platforms=["unix", "windows"],
description="Massively parallel implementation of self-organizing maps",
packages=["somoclu"],
classifiers=[
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Visualization',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: C++'
],
install_requires=['numpy', 'matplotlib', 'scipy'],
test_suite="tests"
)
|
rodricios/eatiht
|
eatiht/v2.py
|
get_html_tree
|
python
|
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
|
From some file path, input stream, or URL, construct and return
an HTML tree.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L113-L158
| null |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/v2.py
|
get_xpath_frequencydistribution
|
python
|
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
|
Build and return a frequency distribution over xpath occurrences.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L162-L173
| null |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/v2.py
|
calc_avgstrlen_pathstextnodes
|
python
|
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
|
In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L177-L190
| null |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/v2.py
|
calc_across_paths_textnodes
|
python
|
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
|
Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L194-L211
| null |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/v2.py
|
get_parent_xpaths_and_textnodes
|
python
|
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
|
Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L215-L250
|
[
"def get_html_tree(filename_url_or_filelike):\n \"\"\"From some file path, input stream, or URL, construct and return\n an HTML tree.\n \"\"\"\n try:\n handler = (\n HTTPSHandler\n if filename_url_or_filelike.lower().startswith('https')\n else HTTPHandler\n )\n cj = CookieJar()\n opener = build_opener(handler)\n opener.add_handler(HTTPCookieProcessor(cj))\n\n resp = opener.open(filename_url_or_filelike)\n except(AttributeError):\n content = filename_url_or_filelike.read()\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n except(ValueError):\n content = filename_url_or_filelike\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n\n try:\n content = resp.read()\n finally:\n resp.close()\n\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n"
] |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/v2.py
|
extract
|
python
|
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
|
A more precise algorithm over the original eatiht algorithm
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/v2.py#L253-L275
|
[
"def get_xpath_frequencydistribution(paths):\n \"\"\" Build and return a frequency distribution over xpath occurrences.\"\"\"\n # \"html/body/div/div/text\" -> [ \"html\", \"body\", \"div\", \"div\", \"text\" ]\n splitpaths = [p.split('/') for p in paths]\n\n # get list of \"parentpaths\" by right-stripping off the last xpath-node,\n # effectively getting the parent path\n parentpaths = ['/'.join(p[:-1]) for p in splitpaths]\n\n # build frequency distribution\n parentpaths_counter = Counter(parentpaths)\n return parentpaths_counter.most_common()\n",
"def get_parent_xpaths_and_textnodes(filename_url_or_filelike,\n xpath_to_text=TEXT_FINDER_XPATH):\n \"\"\"Provided a url, path or filelike obj., we construct an html tree,\n and build a list of parent paths and children textnodes & \"feature\"\n tuples.\n The features - descriptive values used for gathering statistics that\n attempts to describe this artificial environment I've created (parent\n paths and children textnodes) - are initialized to '0'\n\n Modifications of eatiht.get_sentence_xpath_tuples: some code was\n refactored-out, variable names are slightly different. This function\n does wrap the ltml.tree construction, so a file path, file-like\n structure, or URL is required.\n \"\"\"\n html_tree = get_html_tree(filename_url_or_filelike)\n\n xpath_finder = html_tree.getroot().getroottree().getpath\n\n nodes_with_text = html_tree.xpath(xpath_to_text)\n\n # read note 5\n parentpaths_textnodes = [\n (xpath_finder(n),\n [n.xpath('.//text()'), # list of text from textnode\n 0, # number of texts (cardinality)\n 0, # total string length in list of texts\n 0]) # average string length\n for n in nodes_with_text\n ]\n\n if len(parentpaths_textnodes) is 0:\n raise Exception(\"No text nodes satisfied the xpath:\\n\\n\" +\n xpath_to_text + \"\\n\\nThis can be due to user's\" +\n \" custom xpath, min_str_length value, or both\")\n\n return parentpaths_textnodes\n",
"def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):\n \"\"\"In the effort of not using external libraries (like scipy, numpy, etc),\n I've written some harmless code for basic statistical calculations\n \"\"\"\n ttl = 0\n for _, tnodes in pars_tnodes:\n ttl += tnodes[3] # index #3 holds the avg strlen\n\n crd = len(pars_tnodes)\n avg = ttl/crd\n if dbg is True:\n print(avg)\n # avg = ttl/crd\n return (avg, ttl, crd)\n",
"def calc_across_paths_textnodes(paths_nodes, dbg=False):\n \"\"\"Given a list of parent paths tupled with children textnodes, plus\n initialized feature values, we calculate the total and average string\n length of the parent's children textnodes.\n \"\"\"\n\n # for (path, [textnodes],\n # num. of tnodes,\n # ttl strlen across tnodes,\n # avg strlen across tnodes.])\n for path_nodes in paths_nodes:\n cnt = len(path_nodes[1][0])\n ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total\n path_nodes[1][1] = cnt # cardinality\n path_nodes[1][2] = ttl # total\n path_nodes[1][3] = ttl/ cnt # average\n if dbg:\n print(path_nodes[1])\n"
] |
"""eatiht - Extract Article Text In HyperText documents
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
#from lxml.html.clean import Cleaner TODO!
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::em or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
# same as v1
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.split('/') for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
parentpaths = ['/'.join(p[:-1]) for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
# TODO: rename these funcs to something that makes more sense
def calc_avgstrlen_pathstextnodes(pars_tnodes, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for _, tnodes in pars_tnodes:
ttl += tnodes[3] # index #3 holds the avg strlen
crd = len(pars_tnodes)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
# TODO: rename these funcs to something that makes more sense
def calc_across_paths_textnodes(paths_nodes, dbg=False):
"""Given a list of parent paths tupled with children textnodes, plus
initialized feature values, we calculate the total and average string
length of the parent's children textnodes.
"""
# for (path, [textnodes],
# num. of tnodes,
# ttl strlen across tnodes,
# avg strlen across tnodes.])
for path_nodes in paths_nodes:
cnt = len(path_nodes[1][0])
ttl = sum([len(s) for s in paths_nodes[1][0]]) # calculate total
path_nodes[1][1] = cnt # cardinality
path_nodes[1][2] = ttl # total
path_nodes[1][3] = ttl/ cnt # average
if dbg:
print(path_nodes[1])
# TODO: consider changing this name to "get_textnode_subtrees"
# see etv2.
def get_parent_xpaths_and_textnodes(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""Provided a url, path or filelike obj., we construct an html tree,
and build a list of parent paths and children textnodes & "feature"
tuples.
The features - descriptive values used for gathering statistics that
attempts to describe this artificial environment I've created (parent
paths and children textnodes) - are initialized to '0'
Modifications of eatiht.get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
html_tree = get_html_tree(filename_url_or_filelike)
xpath_finder = html_tree.getroot().getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# read note 5
parentpaths_textnodes = [
(xpath_finder(n),
[n.xpath('.//text()'), # list of text from textnode
0, # number of texts (cardinality)
0, # total string length in list of texts
0]) # average string length
for n in nodes_with_text
]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_or_filelike):
"""A more precise algorithm over the original eatiht algorithm
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
try:
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return target_text
except IndexError:
return ""
def extract_more(filename_url_or_filelike):
"""Does what etv2.extract does, but returns not only the text, but also
some of the structures that were built along the way:
results = extract_more(filename_url_or_filelike)
results[0] # extracted text
results[1] # frequency distribution (histogram)
results[2] # subtrees (list of textnodes pre-filter)
results[3] # pruned subtrees
results[4] # list of paragraphs (as seperated in original website)
May provide some insight to how this algorithm is calculated, without
having to read in detail the documentation.
"""
pars_tnodes = get_parent_xpaths_and_textnodes(filename_url_or_filelike)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
calc_across_paths_textnodes(pars_tnodes)
avg, _, _ = calc_avgstrlen_pathstextnodes(pars_tnodes)
filtered = [parpath_tnodes for parpath_tnodes in pars_tnodes
if parpath_tnodes[1][2] > avg]
paths = [path for path, tnode in filtered]
hist = get_xpath_frequencydistribution(paths)
target_tnodes = [tnode for par, tnode in pars_tnodes if hist[0][0] in par]
target_paras = [' '.join(tnode[0]) for tnode in target_tnodes]
target_text = '\n\n'.join([' '.join(tnode[0]) for tnode in target_tnodes])
return (target_text, hist, target_tnodes, filtered, target_paras)
|
rodricios/eatiht
|
eatiht/etv2.py
|
get_xpath_frequencydistribution
|
python
|
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
# thanks to eugene-eeo for optimization
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
|
Build and return a frequency distribution over xpath occurrences.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L178-L190
| null |
"""eatiht v2 - Rodrigo Palacios - Copyright 2014
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
from lxml.html.clean import Cleaner
from .eatiht_trees import TextNodeSubTree, TextNodeTree
# decided to use backslashes for readability?
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# /u/oliver_newton's suggestion made here:
# http://www.reddit.com/r/Python/comments/2pqx2d/just_made_what_i_consider_my_first_algorithm_and/cn0mubp
HTML_CLEANER = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=True, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=True, frames=True, forms=True,
annoying_tags=True,
remove_tags=["a", "i", "em", "b", "strong", "span"],
kill_tags=("noscript", "iframe", "figure"),
remove_unknown_tags=True, safe_attrs_only=True)
# Unfortunately, this cleans up a ton of lines of unnecessary text...
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
# thanks to eugene-eeo for optimization
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def calcavg_avgstrlen_subtrees(subtrees, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for subtree in subtrees:
ttl += subtree.avg_strlen
crd = len(subtrees)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_filelike_or_htmlstring):
"""An "improved" algorithm over the original eatiht algorithm
"""
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
# calculate AABSL
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
# "high-pass" filter
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist)
|
rodricios/eatiht
|
eatiht/etv2.py
|
calcavg_avgstrlen_subtrees
|
python
|
def calcavg_avgstrlen_subtrees(subtrees, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for subtree in subtrees:
ttl += subtree.avg_strlen
crd = len(subtrees)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
|
In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L193-L206
| null |
"""eatiht v2 - Rodrigo Palacios - Copyright 2014
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
from lxml.html.clean import Cleaner
from .eatiht_trees import TextNodeSubTree, TextNodeTree
# decided to use backslashes for readability?
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# /u/oliver_newton's suggestion made here:
# http://www.reddit.com/r/Python/comments/2pqx2d/just_made_what_i_consider_my_first_algorithm_and/cn0mubp
HTML_CLEANER = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=True, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=True, frames=True, forms=True,
annoying_tags=True,
remove_tags=["a", "i", "em", "b", "strong", "span"],
kill_tags=("noscript", "iframe", "figure"),
remove_unknown_tags=True, safe_attrs_only=True)
# Unfortunately, this cleans up a ton of lines of unnecessary text...
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
# thanks to eugene-eeo for optimization
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def calcavg_avgstrlen_subtrees(subtrees, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for subtree in subtrees:
ttl += subtree.avg_strlen
crd = len(subtrees)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_filelike_or_htmlstring):
"""An "improved" algorithm over the original eatiht algorithm
"""
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
# calculate AABSL
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
# "high-pass" filter
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist)
|
rodricios/eatiht
|
eatiht/etv2.py
|
get_textnode_subtrees
|
python
|
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
|
A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L209-L235
| null |
"""eatiht v2 - Rodrigo Palacios - Copyright 2014
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
from lxml.html.clean import Cleaner
from .eatiht_trees import TextNodeSubTree, TextNodeTree
# decided to use backslashes for readability?
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# /u/oliver_newton's suggestion made here:
# http://www.reddit.com/r/Python/comments/2pqx2d/just_made_what_i_consider_my_first_algorithm_and/cn0mubp
HTML_CLEANER = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=True, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=True, frames=True, forms=True,
annoying_tags=True,
remove_tags=["a", "i", "em", "b", "strong", "span"],
kill_tags=("noscript", "iframe", "figure"),
remove_unknown_tags=True, safe_attrs_only=True)
# Unfortunately, this cleans up a ton of lines of unnecessary text...
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
# thanks to eugene-eeo for optimization
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def calcavg_avgstrlen_subtrees(subtrees, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for subtree in subtrees:
ttl += subtree.avg_strlen
crd = len(subtrees)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_filelike_or_htmlstring):
"""An "improved" algorithm over the original eatiht algorithm
"""
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
# calculate AABSL
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
# "high-pass" filter
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist)
|
rodricios/eatiht
|
eatiht/etv2.py
|
extract
|
python
|
def extract(filename_url_filelike_or_htmlstring):
"""An "improved" algorithm over the original eatiht algorithm
"""
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
# calculate AABSL
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
# "high-pass" filter
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist)
|
An "improved" algorithm over the original eatiht algorithm
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L238-L262
|
[
"def get_xpath_frequencydistribution(paths):\n \"\"\" Build and return a frequency distribution over xpath occurrences.\"\"\"\n # \"html/body/div/div/text\" -> [ \"html\", \"body\", \"div\", \"div\", \"text\" ]\n splitpaths = [p.rsplit('/', 1) for p in paths]\n\n # get list of \"parentpaths\" by right-stripping off the last xpath-node,\n # effectively getting the parent path\n # thanks to eugene-eeo for optimization\n parentpaths = [p[0] for p in splitpaths]\n\n # build frequency distribution\n parentpaths_counter = Counter(parentpaths)\n return parentpaths_counter.most_common()\n",
"def get_html_tree(filename_url_or_filelike):\n \"\"\"From some file path, input stream, or URL, construct and return\n an HTML tree.\n \"\"\"\n try:\n handler = (\n HTTPSHandler\n if filename_url_or_filelike.lower().startswith('https')\n else HTTPHandler\n )\n cj = CookieJar()\n opener = build_opener(handler)\n opener.add_handler(HTTPCookieProcessor(cj))\n\n resp = opener.open(filename_url_or_filelike)\n except(AttributeError):\n content = filename_url_or_filelike.read()\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n except(ValueError):\n content = filename_url_or_filelike\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n\n try:\n content = resp.read()\n finally:\n resp.close()\n\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n",
"def calcavg_avgstrlen_subtrees(subtrees, dbg=False):\n \"\"\"In the effort of not using external libraries (like scipy, numpy, etc),\n I've written some harmless code for basic statistical calculations\n \"\"\"\n ttl = 0\n for subtree in subtrees:\n ttl += subtree.avg_strlen\n\n crd = len(subtrees)\n avg = ttl/crd\n if dbg is True:\n print(avg)\n # avg = ttl/crd\n return (avg, ttl, crd)\n",
"def get_textnode_subtrees(html_tree,\n xpath_to_text=TEXT_FINDER_XPATH):\n \"\"\"A modification of get_sentence_xpath_tuples: some code was\n refactored-out, variable names are slightly different. This function\n does wrap the ltml.tree construction, so a file path, file-like\n structure, or URL is required.\n \"\"\"\n\n try:\n xpath_finder = html_tree.getroot().getroottree().getpath\n except(AttributeError):\n xpath_finder = html_tree.getroottree().getpath\n\n nodes_with_text = html_tree.xpath(xpath_to_text)\n\n # Within the TextNodeSubTree construction, the ABSL is calculated\n # refer to eatiht_trees.py\n parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),\n n.xpath('.//text()'))\n for n in nodes_with_text]\n\n if len(parentpaths_textnodes) is 0:\n raise Exception(\"No text nodes satisfied the xpath:\\n\\n\" +\n xpath_to_text + \"\\n\\nThis can be due to user's\" +\n \" custom xpath, min_str_length value, or both\")\n\n return parentpaths_textnodes\n"
] |
"""eatiht v2 - Rodrigo Palacios - Copyright 2014
This version of eatiht v2 is the "script" implementation, where
the result is simply the extracted text; there's also extract_more where
the output is the extracted text plus some of the structures that were
built along the way. Please refer to etv2.py for an almost identical
implementation using the classes declared in eatiht_trees.py; an
explanation to the data structures also exist in that file.
etv2.py has the close to the same logic, maybe one less for loop,
but its return value is a class structure that tries to express
some highlevel structure with the extracted text, as well as the
html wrapping that extracted text. This class will likely have a more
defined, probably dramatically different representation in later releases.
Written by Rodrigo Palacios - Copyright 2014
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm v2, dammit!:
This algorithm is small modification to the original. I argue that
it is more "precise" at the cost of extra computations
that may otherwise be unnecessary.
The overall process is similar to v1, but removes the "partitioning"
step (please refer to the eatiht.py for details). What the above step
allowed for was a way to artificially boost subtrees (where the root
had branches/leaves that comprised of text) towards the top. The
boosting score was in proportion to a rough estimate on the # of
sentences in that subtree.
Instead, we rely on the average string length across each branch in a
subtree as the one of two vital calculations in this algorithm. Let's
call this a subtree's "avg branch string length" or ABSL for short.
just mentioned avg score is stored in list, along with the original
textnodes (branches), the total string length across the textnodes, and
the number of textnodes (you'll see me sometimes refer to this as the
"cardinal" or "cardinality").
The second decisive calculation is the average across all subtrees'
average branch string length.
Yes, it's an ugly mouthful, but it's a pretty and simple calculation.
We iterate across our list of subtrees, accruing a total of each subtree's
ABSL, and then calculate the average of that, which I'll refer to as
the AStABSL (avg. subtree avg branch str.len.) or AABSL.
The AABSL value serves as a cutoff threshold used during a filtering pass.
This filtering pass happens post-ABSL-AABSL calculations; for each
subtree, we use the subtree's total subtree string (TSL) length as the
value that's gets measured against the AABSL value; those subtree's
with TSL values higher than the AABSL are kept for one final processing
step. I basically see this as a high-pass filter.
This last step will be familiar to those who know a bit about how the
first algorithm generated its results. In short, we build a frequency
distribution where the key ("bucket" or "bin" when referring to our
distribution as a histogram) is a subtree's root xpath.
That's basically it. Now to address some differences, and also to address
the claim I made towards the top, that this algorithm is more "precise"
than the previous one.
I'm not sure if "precise" is the correct word to use, but I'll go with
it anyways. The resulting histogram has shown to have less overall buckets.
In other words, in the "high-pass" filtering stage, it prunes out many
subtrees where text is likely to not be a part of resulting "body."
Put simply, and with some AI nomenclature, we shrink our state space
dramatically. In other words, to me,
"smaller state space" === "more precise"
iff "result is the same as previous algorithm"
That may be circular reasoning, faulty logic, what have you. I'm not
classically trained in this sort of thing so I'd appreciate any insight
as to what exactly it is that I'm doing lol.
"""
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
from lxml.html.clean import Cleaner
from .eatiht_trees import TextNodeSubTree, TextNodeTree
# decided to use backslashes for readability?
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::noscript or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# /u/oliver_newton's suggestion made here:
# http://www.reddit.com/r/Python/comments/2pqx2d/just_made_what_i_consider_my_first_algorithm_and/cn0mubp
HTML_CLEANER = Cleaner(scripts=True, javascript=True, comments=True,
style=True, links=True, meta=True, add_nofollow=False,
page_structure=False, processing_instructions=True,
embedded=True, frames=True, forms=True,
annoying_tags=True,
remove_tags=["a", "i", "em", "b", "strong", "span"],
kill_tags=("noscript", "iframe", "figure"),
remove_unknown_tags=True, safe_attrs_only=True)
# Unfortunately, this cleans up a ton of lines of unnecessary text...
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
# Refactored download and lxml tree instantiation
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
""" Build and return a frequency distribution over xpath occurrences."""
# "html/body/div/div/text" -> [ "html", "body", "div", "div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths" by right-stripping off the last xpath-node,
# effectively getting the parent path
# thanks to eugene-eeo for optimization
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def calcavg_avgstrlen_subtrees(subtrees, dbg=False):
"""In the effort of not using external libraries (like scipy, numpy, etc),
I've written some harmless code for basic statistical calculations
"""
ttl = 0
for subtree in subtrees:
ttl += subtree.avg_strlen
crd = len(subtrees)
avg = ttl/crd
if dbg is True:
print(avg)
# avg = ttl/crd
return (avg, ttl, crd)
def get_textnode_subtrees(html_tree,
xpath_to_text=TEXT_FINDER_XPATH):
"""A modification of get_sentence_xpath_tuples: some code was
refactored-out, variable names are slightly different. This function
does wrap the ltml.tree construction, so a file path, file-like
structure, or URL is required.
"""
try:
xpath_finder = html_tree.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = html_tree.getroottree().getpath
nodes_with_text = html_tree.xpath(xpath_to_text)
# Within the TextNodeSubTree construction, the ABSL is calculated
# refer to eatiht_trees.py
parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n),
n.xpath('.//text()'))
for n in nodes_with_text]
if len(parentpaths_textnodes) is 0:
raise Exception("No text nodes satisfied the xpath:\n\n" +
xpath_to_text + "\n\nThis can be due to user's" +
" custom xpath, min_str_length value, or both")
return parentpaths_textnodes
def extract(filename_url_filelike_or_htmlstring):
"""An "improved" algorithm over the original eatiht algorithm
"""
html_tree = get_html_tree(filename_url_filelike_or_htmlstring)
subtrees = get_textnode_subtrees(html_tree)
#[iterable, cardinality, ttl across iterable, avg across iterable.])
# calculate AABSL
avg, _, _ = calcavg_avgstrlen_subtrees(subtrees)
# "high-pass" filter
filtered = [subtree for subtree in subtrees
if subtree.ttl_strlen > avg]
paths = [subtree.parent_path for subtree in filtered]
hist = get_xpath_frequencydistribution(paths)
target_subtrees = [stree for stree in subtrees
if hist[0][0] in stree.parent_path]
title = html_tree.find(".//title")
return TextNodeTree(title.text_content(), target_subtrees, hist)
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeSubTree.__learn_oneself
|
python
|
def __learn_oneself(self):
"""calculate cardinality, total and average string length"""
if not self.__parent_path or not self.__text_nodes:
raise Exception("This error occurred because the step constructor\
had insufficient textnodes or it had empty string\
for its parent xpath")
# Iterate through text nodes and sum up text length
# TODO: consider naming this child_count or cardinality
# or branch_cnt
self.tnodes_cnt = len(self.__text_nodes)
# consider naming this total
self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes])
# consider naming this average
self.avg_strlen = self.ttl_strlen/self.tnodes_cnt
|
calculate cardinality, total and average string length
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L161-L174
| null |
class TextNodeSubTree(object):
""" This class can be described in a few different ways. A proper
explanation requires a brief definition of terms.
There's two W3C-spec'd conceptual pieces that I make use of:
TEXT_NODE - #text - NodeType 3
------------------
A text node is a W3C node type. Its description is a node that
"represents textual content in an element or attribute."
You'll see me write this as textnode, tnode, tnd(?) in cases where
function names get too long, or I get lazy - rarely will you get see
me refer its named constant TEXT_NODE.
This html element is probably the most intuitive out of all html
elements. For one, it's always a leaf node, meaning, it's always(?)
a last node of any branch in the html tree.
One property of the textnode that sort of forces an intuitive, as
opposed to explicit, acknowledgement of the textnode is that it is
usually not visible in web dev. tools. It never dawned on me, until
I used a python parser, that "text" in paragraph (<p>) nodes are
actually nodes themselves!
This diagram shows what I describe above:
When we see this...
...
<p>
Foo
</p>
...
It means this...
...
<p>
<#text>Foo</#text>
</p>
...
Note: don't quote me about the actual #text tag representation I used.
ELEMENT_NODE - p, div, span, etc. - NodeType 1
The second conceptual piece that's required for this class is the
what most people have seen out in wild.
In the context of this algorithm/class, and many other approaches to
text extraction, the element node is the key stone to this data
structure.
Without wasting too many words, as I predict you'll eventually get
bored of these lengthy docstrings, the element node can take form of
long list of nodes (tags), except:
script, noscript, style, i, b, strong, span, a (this list is
subject to additions.)
You'll see me refer to this node as elementnode, elemnode, enode.
Now Combining the Two - TEXT_NODE and ELEMENT_NODE
--------------------------------------------------
In essence, this algorithm captures the natural parent-children
structure already present in any given html tree. The parent
is an ELEMENT_NODE, the children are TEXT_NODES.
In one of the first steps of the algorithm, we try to filter out,
or prune, subtrees that have text, but exist *under* a parent elemnode
in the above referenced list of elemnode exceptions.
Important note about "//text()"
-------------------------------
consider:
...
<p>
I'm
<b>Foo</b>
Bar
</p>
...
reconsider this list of exception elemnodes which we "pruned":
script, noscript, style, i, b, strong, span, a
If you forgot what "//text()" does, it is saying, "select all textnodes"
So after that xpath execution, you might think that we'll end up with
only these textnodes
[
#text("I'm"),
#text("Bar")
]
If you don't think that, then you're good and can skip down a bit.
If you think that that's we'll end up getting, then consider the
following:
In executing "//text()", your xpath query environment/engine will
gather this list of nodes:
[
#text("I'm"),
#text("Foo"),
#text("Bar")
]
And now let's add just one of the exceptions from our list of nodes
to exceptions (please, dont confuse my use of "exceptions" with what
we programmers know as runtime errors and exceptions) to our xpath
query:
//text() -----> //*[not(self::b)]//text()
This produces, to little surprise:
[
#text("I'm"),
#text("Bar")
]
Wouldn't this be counterintuitive? We essentially are cutting one-third
of our textual data.
I unintentionally digressed to explaining the origin of the xpath. Bear
with me. And feel free to skip this, as I think I've explained the
intuition behind this structure enough.
"""
# this may be a possible approach to make this class more generalized
#def __init__(self, *args, **kwargs):
#args -- tuple of anonymous arguments
#kwargs -- dictionary of named arguments
#super(TextNodeSubTree, self).__init__()
#self.parent_path = kwargs.get('parpath')
def __init__(self, parent_elem, parent_path, tnodes):
"""This is a structure that is explained above.
parpath = path to root
tnodes = list of children textnodes
"""
super(TextNodeSubTree, self).__init__()
# subtree's root element (aka wrapping html)
self.__parent_elem = parent_elem
# subtree parent's path
self.__parent_path = parent_path
# number of text children
self.__text_nodes = tnodes
# calculate the feature's values
self.__learn_oneself()
self.clean()
def __learn_oneself(self):
"""calculate cardinality, total and average string length"""
if not self.__parent_path or not self.__text_nodes:
raise Exception("This error occurred because the step constructor\
had insufficient textnodes or it had empty string\
for its parent xpath")
# Iterate through text nodes and sum up text length
# TODO: consider naming this child_count or cardinality
# or branch_cnt
self.tnodes_cnt = len(self.__text_nodes)
# consider naming this total
self.ttl_strlen = sum([len(tnode) for tnode in self.__text_nodes])
# consider naming this average
self.avg_strlen = self.ttl_strlen/self.tnodes_cnt
def get_text(self):
"""Return all joined text in textnodes"""
return "".join(self.__text_nodes)
def get_html(self):
"""Return the html that wraps around the text"""
return self.__parent_elem
def clean(self):
"""clean up newlines"""
for textnode in self.__text_nodes:
textnode.strip()
@property
def parent_path(self):
"""parent_path getter"""
return self.__parent_path
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeTree.__make_tree
|
python
|
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
|
Build a tree using lxml.html.builder and our subtrees
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L228-L250
| null |
class TextNodeTree(object):
"""collection of textnode subtrees"""
def __init__(self, title, subtrees, hist):
"""This is a structure that is explained above."""
super(TextNodeTree, self).__init__()
self.__title = title
self.__subtrees = subtrees
self.__histogram = hist
self.__content_path = hist[0][0]
self.__htmltree = None
self.__fulltext = ""
@property
def get_subtrees(self):
"""Return all subtrees"""
return self.__subtrees
@property
def histogram(self):
"""Return frequency distribution used to find the best subtree"""
return self.__histogram
@property
def title(self):
"""Return title of website"""
return self.__title
@property
def content_path(self):
"""Return xpath to main content"""
return self.__content_path
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
# TODO: I consider this a "prototype" to the template generator
# Clearly, bootstrap shouldn't be the only styling possible
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeTree.get_html
|
python
|
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
|
Generates if need be and returns a simpler html document with text
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L252-L258
|
[
"def __make_tree(self):\n \"\"\"Build a tree using lxml.html.builder and our subtrees\"\"\"\n\n # create div with \"container\" class\n div = E.DIV(E.CLASS(\"container\"))\n\n # append header with title\n div.append(E.H2(self.__title))\n\n # next, iterate through subtrees appending each tree to div\n for subtree in self.__subtrees:\n div.append(subtree.get_html())\n\n # Connect div to body\n body = E.BODY(div)\n\n # attach body to html\n self.__htmltree = E.HTML(\n E.HEAD(\n E.TITLE(self.__title)\n ),\n body\n )\n"
] |
class TextNodeTree(object):
"""collection of textnode subtrees"""
def __init__(self, title, subtrees, hist):
"""This is a structure that is explained above."""
super(TextNodeTree, self).__init__()
self.__title = title
self.__subtrees = subtrees
self.__histogram = hist
self.__content_path = hist[0][0]
self.__htmltree = None
self.__fulltext = ""
@property
def get_subtrees(self):
"""Return all subtrees"""
return self.__subtrees
@property
def histogram(self):
"""Return frequency distribution used to find the best subtree"""
return self.__histogram
@property
def title(self):
"""Return title of website"""
return self.__title
@property
def content_path(self):
"""Return xpath to main content"""
return self.__content_path
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
# TODO: I consider this a "prototype" to the template generator
# Clearly, bootstrap shouldn't be the only styling possible
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeTree.get_html_string
|
python
|
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
|
Generates if need be and returns a simpler html string with
extracted text
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L260-L267
|
[
"def __make_tree(self):\n \"\"\"Build a tree using lxml.html.builder and our subtrees\"\"\"\n\n # create div with \"container\" class\n div = E.DIV(E.CLASS(\"container\"))\n\n # append header with title\n div.append(E.H2(self.__title))\n\n # next, iterate through subtrees appending each tree to div\n for subtree in self.__subtrees:\n div.append(subtree.get_html())\n\n # Connect div to body\n body = E.BODY(div)\n\n # attach body to html\n self.__htmltree = E.HTML(\n E.HEAD(\n E.TITLE(self.__title)\n ),\n body\n )\n"
] |
class TextNodeTree(object):
"""collection of textnode subtrees"""
def __init__(self, title, subtrees, hist):
"""This is a structure that is explained above."""
super(TextNodeTree, self).__init__()
self.__title = title
self.__subtrees = subtrees
self.__histogram = hist
self.__content_path = hist[0][0]
self.__htmltree = None
self.__fulltext = ""
@property
def get_subtrees(self):
"""Return all subtrees"""
return self.__subtrees
@property
def histogram(self):
"""Return frequency distribution used to find the best subtree"""
return self.__histogram
@property
def title(self):
"""Return title of website"""
return self.__title
@property
def content_path(self):
"""Return xpath to main content"""
return self.__content_path
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
# TODO: I consider this a "prototype" to the template generator
# Clearly, bootstrap shouldn't be the only styling possible
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeTree.get_text
|
python
|
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
|
Return all joined text from each subtree
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L269-L276
| null |
class TextNodeTree(object):
"""collection of textnode subtrees"""
def __init__(self, title, subtrees, hist):
"""This is a structure that is explained above."""
super(TextNodeTree, self).__init__()
self.__title = title
self.__subtrees = subtrees
self.__histogram = hist
self.__content_path = hist[0][0]
self.__htmltree = None
self.__fulltext = ""
@property
def get_subtrees(self):
"""Return all subtrees"""
return self.__subtrees
@property
def histogram(self):
"""Return frequency distribution used to find the best subtree"""
return self.__histogram
@property
def title(self):
"""Return title of website"""
return self.__title
@property
def content_path(self):
"""Return xpath to main content"""
return self.__content_path
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
# TODO: I consider this a "prototype" to the template generator
# Clearly, bootstrap shouldn't be the only styling possible
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
rodricios/eatiht
|
eatiht/eatiht_trees.py
|
TextNodeTree.bootstrapify
|
python
|
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
Add bootstrap cdn to headers of html
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht_trees.py#L280-L300
|
[
"def __make_tree(self):\n \"\"\"Build a tree using lxml.html.builder and our subtrees\"\"\"\n\n # create div with \"container\" class\n div = E.DIV(E.CLASS(\"container\"))\n\n # append header with title\n div.append(E.H2(self.__title))\n\n # next, iterate through subtrees appending each tree to div\n for subtree in self.__subtrees:\n div.append(subtree.get_html())\n\n # Connect div to body\n body = E.BODY(div)\n\n # attach body to html\n self.__htmltree = E.HTML(\n E.HEAD(\n E.TITLE(self.__title)\n ),\n body\n )\n"
] |
class TextNodeTree(object):
"""collection of textnode subtrees"""
def __init__(self, title, subtrees, hist):
"""This is a structure that is explained above."""
super(TextNodeTree, self).__init__()
self.__title = title
self.__subtrees = subtrees
self.__histogram = hist
self.__content_path = hist[0][0]
self.__htmltree = None
self.__fulltext = ""
@property
def get_subtrees(self):
"""Return all subtrees"""
return self.__subtrees
@property
def histogram(self):
"""Return frequency distribution used to find the best subtree"""
return self.__histogram
@property
def title(self):
"""Return title of website"""
return self.__title
@property
def content_path(self):
"""Return xpath to main content"""
return self.__content_path
def __make_tree(self):
"""Build a tree using lxml.html.builder and our subtrees"""
# create div with "container" class
div = E.DIV(E.CLASS("container"))
# append header with title
div.append(E.H2(self.__title))
# next, iterate through subtrees appending each tree to div
for subtree in self.__subtrees:
div.append(subtree.get_html())
# Connect div to body
body = E.BODY(div)
# attach body to html
self.__htmltree = E.HTML(
E.HEAD(
E.TITLE(self.__title)
),
body
)
def get_html(self):
"""Generates if need be and returns a simpler html document with text"""
if self.__htmltree is not None:
return self.__htmltree
else:
self.__make_tree()
return self.__htmltree
def get_html_string(self):
"""Generates if need be and returns a simpler html string with
extracted text"""
if self.__htmltree is not None:
return htmltostring(self.__htmltree)
else:
self.__make_tree()
return htmltostring(self.__htmltree)
def get_text(self):
"""Return all joined text from each subtree"""
if self.__fulltext:
return self.__fulltext
else:
self.__fulltext = "\n\n".join(text.get_text()
for text in self.__subtrees)
return self.__fulltext
# TODO: I consider this a "prototype" to the template generator
# Clearly, bootstrap shouldn't be the only styling possible
def bootstrapify(self):
"""Add bootstrap cdn to headers of html"""
if self.__htmltree is None:
#raise Exception("HtmlTree has not been made yet")
self.__make_tree()
# add bootstrap cdn to head
self.__htmltree.find('head').append(
E.LINK(rel="stylesheet",
href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.1/css/bootstrap.min.css",
type="text/css"))
# center images
for img_parent in self.__htmltree.xpath("//img/.."):
# the space before the class to insert is CRITICAL!
img_parent.attrib["class"] += " text-center"
# make images responsive
for img in self.__htmltree.xpath("//img"):
# the space before the class to insert is CRITICAL!
img.attrib["class"] += " img-responsive"
|
rodricios/eatiht
|
eatiht/eatiht.py
|
get_sentence_xpath_tuples
|
python
|
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
"""
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
# hard-code paragraph breaks (there has to be a better way)
('\n\n' + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs
|
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht.py#L189-L216
|
[
"def get_html_tree(filename_url_or_filelike):\n \"\"\"From some file path, input stream, or URL, construct and return\n an HTML tree.\n \"\"\"\n try:\n handler = (\n HTTPSHandler\n if filename_url_or_filelike.lower().startswith('https')\n else HTTPHandler\n )\n cj = CookieJar()\n opener = build_opener(handler)\n opener.add_handler(HTTPCookieProcessor(cj))\n\n resp = opener.open(filename_url_or_filelike)\n except(AttributeError):\n content = filename_url_or_filelike.read()\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n except(ValueError):\n content = filename_url_or_filelike\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n\n try:\n content = resp.read()\n finally:\n resp.close()\n\n encoding = chardet.detect(content)['encoding']\n\n parsed_html = html.parse(BytesIO(content),\n html.HTMLParser(encoding=encoding,\n remove_blank_text=True))\n\n return parsed_html\n"
] |
"""
eatiht
Extract Article Text In HyperText documents
written by Rodrigo Palacios
**tl;dr**
(revised on 12/20/2014)
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm, dammit!:
Using a clever xpath expression that targets the immediate parents of
text nodes of a certain length N, one can get a list of parent nodes
which have, what we can consider as "ideal," text nodes (nodes that
have sentences).
For each text node, we "partition" the text node so that instead of the
parent node having the original text node as its lone child, the parent
now has P children; the partitioning method used is a REGEX sentence
split.
Finally, using now the *parents* of the the above mentioned parent
nodes as our sample, we create a frequency distribution measuring
the number of text node descendants of each parent. In other words,
We can find the xpath with the most number of text node descendants.
This output has shown to lead us to the main article in a webpage.
**A slightly more formal explenation**
(Needs revision as of 12/20/2014)
A reminder: with the help of one of the most fundamental statistical
tools - the frequency distribution - one can easily pick out the
element appearing most frequently in a list of elements.
Now, consider some arbitrary webpage, comprising of stylistic/structural
nodes (div, p, etc.) and "text" nodes (html leafnodes that contain
onscreen text). For every node, there exists at least one XPath that
can describe a leaf node's location within the html tree. If one
assumes some arbitry "sentence length" N and queries for text nodes
that adhere to that constraint (ie. string-length > N), a list of only
text nodes with string length greater than N is returned.
Using those newly-acquired list of nodes, two things must happen for
this algorithm to work properly:
1. Split the text within each text node into sentences (current
implementation relies on REGEX sentence-splitting patterns).
2. For each new pseudo-node that is created upon sentence-split, attach
*not* the xpath that leads to the original text node, but the xpath of
the *parent* node that leads to the original text node.
The last two steps will essentially create a list of (sentence, xpath)
tuples. After this, one can build a frequency distribution across the
xpaths.
Finally, the most frequent element in the freq. distribution (aka
"argmax") should* be the parent node leading to the structural html-element
that "divides" or "encompasses" the main text body.
Please refer to this project's github page for more information:
https://github.com/im-rodrigo/eatiht
Contact the author:
twitter - @mi_ogirdor
email - rodrigopala91@gmail.com
github - https://github.com/im-rodrigo
"""
import re
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
# This xpath expression effectively queries html text
# nodes that have a string-length greater than 20
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# REGEX patterns for catching bracketted numbers - as seen in wiki articles -
# and sentence splitters
BRACKET_PATTERN = re.compile(r'(\[\d*\])')
# http://stackoverflow.com/questions/8465335/a-regex-for-extracting-sentence-from-a-paragraph-in-python
SENTENCE_TOKEN_PATTERN = re.compile(r"""
# Split sentences on whitespace between them.
(?: # Group for two positive lookbehinds.
(?<=[.!?]) # Either an end of sentence punct,
| (?<=[.!?]['"]) # or end of sentence punct and quote.
) # End group of two positive lookbehinds.
(?<! Mr\. ) # Don't end sentence on "Mr."
(?<! Mrs\. ) # Don't end sentence on "Mrs."
(?<! Jr\. ) # Don't end sentence on "Jr."
(?<! Dr\. ) # Don't end sentence on "Dr."
(?<! Prof\. ) # Don't end sentence on "Prof."
(?<! Sr\. ) # Don't end sentence on "Sr."
\s+ # Split on whitespace between sentences.
""", re.IGNORECASE | re.VERBOSE)
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
"""
Build and return a frequency distribution over xpath occurrences.
"""
# "html/body/div/div/text" -> [ "html/body/div/div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths"
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
"""
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
# hard-code paragraph breaks (there has to be a better way)
('\n\n' + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs
def extract(url_or_htmlstring, xpath_to_text=TEXT_FINDER_XPATH):
"""
Wrapper function for extracting the main article from html document.
A crappy flowchart/state-diagram:
start: url[,xpath] -> xpaths of text-nodes -> frequency distribution
-> argmax( freq. dist. ) = likely xpath leading to article's content
"""
sent_xpath_pairs = get_sentence_xpath_tuples(url_or_htmlstring, xpath_to_text)
hist = get_xpath_frequencydistribution(
[x for (s, x) in sent_xpath_pairs])
max_path = hist[0]
article_text = ' '.join([s for (s, x) in sent_xpath_pairs
if max_path[0] in x])
# starting from index 2 because of the two extra newlines in front
return article_text[2:]
|
rodricios/eatiht
|
eatiht/eatiht.py
|
extract
|
python
|
def extract(url_or_htmlstring, xpath_to_text=TEXT_FINDER_XPATH):
"""
Wrapper function for extracting the main article from html document.
A crappy flowchart/state-diagram:
start: url[,xpath] -> xpaths of text-nodes -> frequency distribution
-> argmax( freq. dist. ) = likely xpath leading to article's content
"""
sent_xpath_pairs = get_sentence_xpath_tuples(url_or_htmlstring, xpath_to_text)
hist = get_xpath_frequencydistribution(
[x for (s, x) in sent_xpath_pairs])
max_path = hist[0]
article_text = ' '.join([s for (s, x) in sent_xpath_pairs
if max_path[0] in x])
# starting from index 2 because of the two extra newlines in front
return article_text[2:]
|
Wrapper function for extracting the main article from html document.
A crappy flowchart/state-diagram:
start: url[,xpath] -> xpaths of text-nodes -> frequency distribution
-> argmax( freq. dist. ) = likely xpath leading to article's content
|
train
|
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/eatiht.py#L219-L238
|
[
"def get_sentence_xpath_tuples(filename_url_or_filelike,\n xpath_to_text=TEXT_FINDER_XPATH):\n \"\"\"\n Given a url and xpath, this function will download, parse, then\n iterate though queried text-nodes. From the resulting text-nodes,\n extract a list of (text, exact-xpath) tuples.\n \"\"\"\n\n parsed_html = get_html_tree(filename_url_or_filelike)\n\n try:\n xpath_finder = parsed_html.getroot().getroottree().getpath\n except(AttributeError):\n xpath_finder = parsed_html.getroottree().getpath\n\n nodes_with_text = parsed_html.xpath(xpath_to_text)\n\n sent_xpath_pairs = [\n # hard-code paragraph breaks (there has to be a better way)\n ('\\n\\n' + s, xpath_finder(n)) if e == 0\n else (s, xpath_finder(n))\n for n in nodes_with_text\n for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(\n BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))\n if s.endswith(tuple(SENTENCE_ENDING))\n ]\n\n return sent_xpath_pairs\n",
"def get_xpath_frequencydistribution(paths):\n \"\"\"\n Build and return a frequency distribution over xpath occurrences.\n \"\"\"\n # \"html/body/div/div/text\" -> [ \"html/body/div/div\", \"text\" ]\n splitpaths = [p.rsplit('/', 1) for p in paths]\n\n # get list of \"parentpaths\"\n parentpaths = [p[0] for p in splitpaths]\n\n # build frequency distribution\n parentpaths_counter = Counter(parentpaths)\n return parentpaths_counter.most_common()\n"
] |
"""
eatiht
Extract Article Text In HyperText documents
written by Rodrigo Palacios
**tl;dr**
(revised on 12/20/2014)
Note: for those unfamiliar with xpaths, think of them as file/folder
paths, where each "file/folder" is really just some HTML element.
Algorithm, dammit!:
Using a clever xpath expression that targets the immediate parents of
text nodes of a certain length N, one can get a list of parent nodes
which have, what we can consider as "ideal," text nodes (nodes that
have sentences).
For each text node, we "partition" the text node so that instead of the
parent node having the original text node as its lone child, the parent
now has P children; the partitioning method used is a REGEX sentence
split.
Finally, using now the *parents* of the the above mentioned parent
nodes as our sample, we create a frequency distribution measuring
the number of text node descendants of each parent. In other words,
We can find the xpath with the most number of text node descendants.
This output has shown to lead us to the main article in a webpage.
**A slightly more formal explenation**
(Needs revision as of 12/20/2014)
A reminder: with the help of one of the most fundamental statistical
tools - the frequency distribution - one can easily pick out the
element appearing most frequently in a list of elements.
Now, consider some arbitrary webpage, comprising of stylistic/structural
nodes (div, p, etc.) and "text" nodes (html leafnodes that contain
onscreen text). For every node, there exists at least one XPath that
can describe a leaf node's location within the html tree. If one
assumes some arbitry "sentence length" N and queries for text nodes
that adhere to that constraint (ie. string-length > N), a list of only
text nodes with string length greater than N is returned.
Using those newly-acquired list of nodes, two things must happen for
this algorithm to work properly:
1. Split the text within each text node into sentences (current
implementation relies on REGEX sentence-splitting patterns).
2. For each new pseudo-node that is created upon sentence-split, attach
*not* the xpath that leads to the original text node, but the xpath of
the *parent* node that leads to the original text node.
The last two steps will essentially create a list of (sentence, xpath)
tuples. After this, one can build a frequency distribution across the
xpaths.
Finally, the most frequent element in the freq. distribution (aka
"argmax") should* be the parent node leading to the structural html-element
that "divides" or "encompasses" the main text body.
Please refer to this project's github page for more information:
https://github.com/im-rodrigo/eatiht
Contact the author:
twitter - @mi_ogirdor
email - rodrigopala91@gmail.com
github - https://github.com/im-rodrigo
"""
import re
import chardet
from collections import Counter
try:
from cStringIO import StringIO as BytesIO
from urllib2 import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from cookielib import CookieJar
except ImportError:
from io import BytesIO
from urllib.request import HTTPHandler, HTTPSHandler, build_opener, HTTPCookieProcessor
from http.cookiejar import CookieJar
from lxml import html
# This xpath expression effectively queries html text
# nodes that have a string-length greater than 20
TEXT_FINDER_XPATH = '//body\
//*[not(\
self::script or \
self::style or \
self::i or \
self::b or \
self::strong or \
self::span or \
self::a)] \
/text()[string-length(normalize-space()) > 20]/..'
# REGEX patterns for catching bracketted numbers - as seen in wiki articles -
# and sentence splitters
BRACKET_PATTERN = re.compile(r'(\[\d*\])')
# http://stackoverflow.com/questions/8465335/a-regex-for-extracting-sentence-from-a-paragraph-in-python
SENTENCE_TOKEN_PATTERN = re.compile(r"""
# Split sentences on whitespace between them.
(?: # Group for two positive lookbehinds.
(?<=[.!?]) # Either an end of sentence punct,
| (?<=[.!?]['"]) # or end of sentence punct and quote.
) # End group of two positive lookbehinds.
(?<! Mr\. ) # Don't end sentence on "Mr."
(?<! Mrs\. ) # Don't end sentence on "Mrs."
(?<! Jr\. ) # Don't end sentence on "Jr."
(?<! Dr\. ) # Don't end sentence on "Dr."
(?<! Prof\. ) # Don't end sentence on "Prof."
(?<! Sr\. ) # Don't end sentence on "Sr."
\s+ # Split on whitespace between sentences.
""", re.IGNORECASE | re.VERBOSE)
SENTENCE_ENDING = ['.', '"', '?', '!', "'"]
def get_html_tree(filename_url_or_filelike):
"""From some file path, input stream, or URL, construct and return
an HTML tree.
"""
try:
handler = (
HTTPSHandler
if filename_url_or_filelike.lower().startswith('https')
else HTTPHandler
)
cj = CookieJar()
opener = build_opener(handler)
opener.add_handler(HTTPCookieProcessor(cj))
resp = opener.open(filename_url_or_filelike)
except(AttributeError):
content = filename_url_or_filelike.read()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
except(ValueError):
content = filename_url_or_filelike
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
try:
content = resp.read()
finally:
resp.close()
encoding = chardet.detect(content)['encoding']
parsed_html = html.parse(BytesIO(content),
html.HTMLParser(encoding=encoding,
remove_blank_text=True))
return parsed_html
def get_xpath_frequencydistribution(paths):
"""
Build and return a frequency distribution over xpath occurrences.
"""
# "html/body/div/div/text" -> [ "html/body/div/div", "text" ]
splitpaths = [p.rsplit('/', 1) for p in paths]
# get list of "parentpaths"
parentpaths = [p[0] for p in splitpaths]
# build frequency distribution
parentpaths_counter = Counter(parentpaths)
return parentpaths_counter.most_common()
def get_sentence_xpath_tuples(filename_url_or_filelike,
xpath_to_text=TEXT_FINDER_XPATH):
"""
Given a url and xpath, this function will download, parse, then
iterate though queried text-nodes. From the resulting text-nodes,
extract a list of (text, exact-xpath) tuples.
"""
parsed_html = get_html_tree(filename_url_or_filelike)
try:
xpath_finder = parsed_html.getroot().getroottree().getpath
except(AttributeError):
xpath_finder = parsed_html.getroottree().getpath
nodes_with_text = parsed_html.xpath(xpath_to_text)
sent_xpath_pairs = [
# hard-code paragraph breaks (there has to be a better way)
('\n\n' + s, xpath_finder(n)) if e == 0
else (s, xpath_finder(n))
for n in nodes_with_text
for e, s in enumerate(SENTENCE_TOKEN_PATTERN.split(
BRACKET_PATTERN.sub('', ''.join(n.xpath('.//text()')))))
if s.endswith(tuple(SENTENCE_ENDING))
]
return sent_xpath_pairs
def extract(url_or_htmlstring, xpath_to_text=TEXT_FINDER_XPATH):
"""
Wrapper function for extracting the main article from html document.
A crappy flowchart/state-diagram:
start: url[,xpath] -> xpaths of text-nodes -> frequency distribution
-> argmax( freq. dist. ) = likely xpath leading to article's content
"""
sent_xpath_pairs = get_sentence_xpath_tuples(url_or_htmlstring, xpath_to_text)
hist = get_xpath_frequencydistribution(
[x for (s, x) in sent_xpath_pairs])
max_path = hist[0]
article_text = ' '.join([s for (s, x) in sent_xpath_pairs
if max_path[0] in x])
# starting from index 2 because of the two extra newlines in front
return article_text[2:]
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.sign
|
python
|
def sign(self, node):
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
|
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L29-L44
|
[
"def fill_key_info(self, key_info, signature_method):\n \"\"\"\n Fills the KeyInfo node\n :param key_info: KeyInfo node \n :type key_info: lxml.etree.Element\n :param signature_method: Signature node to use\n :type signature_method: str\n :return: None\n \"\"\"\n x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)\n if x509_data is not None:\n self.fill_x509_data(x509_data)\n key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)\n if key_name is not None and self.key_name is not None:\n key_name.text = self.key_name\n key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)\n if key_value is not None:\n key_value.text = '\\n'\n signature = constants.TransformUsageSignatureMethod[\n signature_method\n ]\n key = self.public_key\n if self.public_key is None:\n key = self.private_key.public_key()\n if not isinstance(\n key, signature['method'].public_key_class\n ):\n raise Exception('Key not compatible with signature method')\n signature['method'].key_value(key_value, key)\n",
"def fill_signed_info(self, signed_info):\n \"\"\"\n Fills the SignedInfo node\n :param signed_info: SignedInfo node\n :type signed_info: lxml.etree.Element\n :return: None\n \"\"\"\n for reference in signed_info.findall(\n 'ds:Reference', namespaces=constants.NS_MAP\n ):\n self.calculate_reference(reference, True)\n",
"def calculate_signature(self, node, sign=True):\n \"\"\"\n Calculate or verifies the signature\n :param node: Signature node\n :type node: lxml.etree.Element\n :param sign: It checks if it must calculate or verify\n :type sign: bool\n :return: None\n \"\"\"\n signed_info_xml = node.find('ds:SignedInfo',\n namespaces=constants.NS_MAP)\n canonicalization_method = signed_info_xml.find(\n 'ds:CanonicalizationMethod', namespaces=constants.NS_MAP\n ).get('Algorithm')\n signature_method = signed_info_xml.find(\n 'ds:SignatureMethod', namespaces=constants.NS_MAP\n ).get('Algorithm')\n if signature_method not in constants.TransformUsageSignatureMethod:\n raise Exception('Method ' + signature_method + ' not accepted')\n signature = constants.TransformUsageSignatureMethod[signature_method]\n signed_info = self.canonicalization(\n canonicalization_method, signed_info_xml\n )\n if not sign:\n signature_value = node.find('ds:SignatureValue',\n namespaces=constants.NS_MAP).text\n public_key = signature['method'].get_public_key(node, self)\n signature['method'].verify(\n signature_value,\n signed_info,\n public_key,\n signature['digest']\n )\n else:\n node.find(\n 'ds:SignatureValue', namespaces=constants.NS_MAP\n ).text = b64_print(base64.b64encode(\n signature['method'].sign(\n signed_info,\n self.private_key,\n signature['digest']\n )\n ))\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.fill_key_info
|
python
|
def fill_key_info(self, key_info, signature_method):
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
|
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L46-L74
|
[
"def fill_x509_data(self, x509_data):\n \"\"\"\n Fills the X509Data Node\n :param x509_data: X509Data Node\n :type x509_data: lxml.etree.Element\n :return: None\n \"\"\"\n x509_issuer_serial = x509_data.find(\n 'ds:X509IssuerSerial', namespaces=constants.NS_MAP\n )\n if x509_issuer_serial is not None:\n self.fill_x509_issuer_name(x509_issuer_serial)\n\n x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)\n if x509_crl is not None and self.crl is not None:\n x509_data.text = base64.b64encode(\n self.crl.public_bytes(serialization.Encoding.DER)\n )\n x509_subject = x509_data.find(\n 'ds:X509SubjectName', namespaces=constants.NS_MAP\n )\n if x509_subject is not None:\n x509_subject.text = get_rdns_name(self.x509.subject.rdns)\n x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)\n if x509_ski is not None:\n x509_ski.text = base64.b64encode(\n self.x509.extensions.get_extension_for_oid(\n ExtensionOID.SUBJECT_KEY_IDENTIFIER\n ).value.digest)\n x509_certificate = x509_data.find(\n 'ds:X509Certificate', namespaces=constants.NS_MAP\n )\n if x509_certificate is not None:\n s = base64.b64encode(\n self.x509.public_bytes(encoding=serialization.Encoding.DER)\n )\n x509_certificate.text = b64_print(s)\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.fill_x509_data
|
python
|
def fill_x509_data(self, x509_data):
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
|
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L76-L112
|
[
"def b64_print(s):\n \"\"\"\n Prints a string with spaces at every b64_intro characters\n :param s: String to print\n :return: String\n \"\"\"\n if USING_PYTHON2:\n string = str(s)\n else:\n string = str(s, 'utf8')\n return '\\n'.join(\n string[pos:pos + b64_intro] for pos in range(0, len(string), b64_intro)\n )\n",
"def get_rdns_name(rdns):\n \"\"\"\n Gets the rdns String name\n :param rdns: RDNS object\n :type rdns: cryptography.x509.RelativeDistinguishedName\n :return: RDNS name\n \"\"\"\n name = ''\n for rdn in rdns:\n for attr in rdn._attributes:\n if len(name) > 0:\n name = name + ','\n if attr.oid in OID_NAMES:\n name = name + OID_NAMES[attr.oid]\n else:\n name = name + attr.oid._name\n name = name + '=' + attr.value\n return name\n",
"def fill_x509_issuer_name(self, x509_issuer_serial):\n \"\"\"\n Fills the X509IssuerSerial node\n :param x509_issuer_serial: X509IssuerSerial node\n :type x509_issuer_serial: lxml.etree.Element\n :return: None\n \"\"\"\n x509_issuer_name = x509_issuer_serial.find(\n 'ds:X509IssuerName', namespaces=constants.NS_MAP\n )\n if x509_issuer_name is not None:\n x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)\n x509_issuer_number = x509_issuer_serial.find(\n 'ds:X509SerialNumber', namespaces=constants.NS_MAP\n )\n if x509_issuer_number is not None:\n x509_issuer_number.text = str(self.x509.serial_number)\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.fill_x509_issuer_name
|
python
|
def fill_x509_issuer_name(self, x509_issuer_serial):
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
|
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L114-L130
|
[
"def get_rdns_name(rdns):\n \"\"\"\n Gets the rdns String name\n :param rdns: RDNS object\n :type rdns: cryptography.x509.RelativeDistinguishedName\n :return: RDNS name\n \"\"\"\n name = ''\n for rdn in rdns:\n for attr in rdn._attributes:\n if len(name) > 0:\n name = name + ','\n if attr.oid in OID_NAMES:\n name = name + OID_NAMES[attr.oid]\n else:\n name = name + attr.oid._name\n name = name + '=' + attr.value\n return name\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.fill_signed_info
|
python
|
def fill_signed_info(self, signed_info):
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
|
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L132-L142
|
[
"def calculate_reference(self, reference, sign=True):\n \"\"\"\n Calculates or verifies the digest of the reference\n :param reference: Reference node\n :type reference: lxml.etree.Element\n :param sign: It marks if we must sign or check a signature\n :type sign: bool\n :return: None\n \"\"\"\n node = self.get_uri(reference.get('URI', ''), reference)\n transforms = reference.find(\n 'ds:Transforms', namespaces=constants.NS_MAP\n )\n if transforms is not None:\n for transform in transforms.findall(\n 'ds:Transform', namespaces=constants.NS_MAP\n ):\n node = self.transform(transform, node)\n digest_value = self.digest(\n reference.find(\n 'ds:DigestMethod', namespaces=constants.NS_MAP\n ).get('Algorithm'),\n node\n )\n if not sign:\n return digest_value.decode() == reference.find(\n 'ds:DigestValue', namespaces=constants.NS_MAP\n ).text\n\n reference.find(\n 'ds:DigestValue', namespaces=constants.NS_MAP\n ).text = digest_value\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.verify
|
python
|
def verify(self, node):
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
|
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L144-L169
|
[
"def calculate_reference(self, reference, sign=True):\n \"\"\"\n Calculates or verifies the digest of the reference\n :param reference: Reference node\n :type reference: lxml.etree.Element\n :param sign: It marks if we must sign or check a signature\n :type sign: bool\n :return: None\n \"\"\"\n node = self.get_uri(reference.get('URI', ''), reference)\n transforms = reference.find(\n 'ds:Transforms', namespaces=constants.NS_MAP\n )\n if transforms is not None:\n for transform in transforms.findall(\n 'ds:Transform', namespaces=constants.NS_MAP\n ):\n node = self.transform(transform, node)\n digest_value = self.digest(\n reference.find(\n 'ds:DigestMethod', namespaces=constants.NS_MAP\n ).get('Algorithm'),\n node\n )\n if not sign:\n return digest_value.decode() == reference.find(\n 'ds:DigestValue', namespaces=constants.NS_MAP\n ).text\n\n reference.find(\n 'ds:DigestValue', namespaces=constants.NS_MAP\n ).text = digest_value\n",
"def calculate_signature(self, node, sign=True):\n \"\"\"\n Calculate or verifies the signature\n :param node: Signature node\n :type node: lxml.etree.Element\n :param sign: It checks if it must calculate or verify\n :type sign: bool\n :return: None\n \"\"\"\n signed_info_xml = node.find('ds:SignedInfo',\n namespaces=constants.NS_MAP)\n canonicalization_method = signed_info_xml.find(\n 'ds:CanonicalizationMethod', namespaces=constants.NS_MAP\n ).get('Algorithm')\n signature_method = signed_info_xml.find(\n 'ds:SignatureMethod', namespaces=constants.NS_MAP\n ).get('Algorithm')\n if signature_method not in constants.TransformUsageSignatureMethod:\n raise Exception('Method ' + signature_method + ' not accepted')\n signature = constants.TransformUsageSignatureMethod[signature_method]\n signed_info = self.canonicalization(\n canonicalization_method, signed_info_xml\n )\n if not sign:\n signature_value = node.find('ds:SignatureValue',\n namespaces=constants.NS_MAP).text\n public_key = signature['method'].get_public_key(node, self)\n signature['method'].verify(\n signature_value,\n signed_info,\n public_key,\n signature['digest']\n )\n else:\n node.find(\n 'ds:SignatureValue', namespaces=constants.NS_MAP\n ).text = b64_print(base64.b64encode(\n signature['method'].sign(\n signed_info,\n self.private_key,\n signature['digest']\n )\n ))\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.transform
|
python
|
def transform(self, transform, node):
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
|
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L171-L205
|
[
"def canonicalization(self, method, node):\n \"\"\"\n Canonicalizes a node following the method\n :param method: Method identification\n :type method: str\n :param node: object to canonicalize\n :type node: str\n :return: Canonicalized node in a String\n \"\"\"\n if method not in constants.TransformUsageC14NMethod:\n raise Exception('Method not allowed: ' + method)\n c14n_method = constants.TransformUsageC14NMethod[method]\n return etree.tostring(\n node,\n method=c14n_method['method'],\n with_comments=c14n_method['comments'],\n exclusive=c14n_method['exclusive']\n )\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.canonicalization
|
python
|
def canonicalization(self, method, node):
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
|
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L207-L224
| null |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.digest
|
python
|
def digest(self, method, node):
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
|
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L226-L239
| null |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.get_uri
|
python
|
def get_uri(self, uri, reference):
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
|
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L241-L272
|
[
"def canonicalization(self, method, node):\n \"\"\"\n Canonicalizes a node following the method\n :param method: Method identification\n :type method: str\n :param node: object to canonicalize\n :type node: str\n :return: Canonicalized node in a String\n \"\"\"\n if method not in constants.TransformUsageC14NMethod:\n raise Exception('Method not allowed: ' + method)\n c14n_method = constants.TransformUsageC14NMethod[method]\n return etree.tostring(\n node,\n method=c14n_method['method'],\n with_comments=c14n_method['comments'],\n exclusive=c14n_method['exclusive']\n )\n",
"def check_uri_attr(self, node, xpath_query, uri, attr):\n return node.xpath(xpath_query.format(attr), uri=uri.lstrip(\"#\"))\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.calculate_reference
|
python
|
def calculate_reference(self, reference, sign=True):
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
|
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L277-L308
|
[
"def transform(self, transform, node):\n \"\"\"\n Transforms a node following the transform especification\n :param transform: Transform node\n :type transform: lxml.etree.Element\n :param node: Element to transform\n :type node: str\n :return: Transformed node in a String\n \"\"\"\n method = transform.get('Algorithm')\n if method not in constants.TransformUsageDSigTransform:\n raise Exception('Method not allowed')\n # C14N methods are allowed\n if method in constants.TransformUsageC14NMethod:\n return self.canonicalization(method, etree.fromstring(node))\n # Enveloped method removes the Signature Node from the element\n if method == constants.TransformEnveloped:\n tree = transform.getroottree()\n root = etree.fromstring(node)\n signature = root.find(\n tree.getelementpath(\n transform.getparent().getparent().getparent().getparent()\n )\n )\n root.remove(signature)\n return self.canonicalization(\n constants.TransformInclC14N, root)\n if method == constants.TransformBase64:\n try:\n root = etree.fromstring(node)\n return base64.b64decode(root.text)\n except Exception:\n return base64.b64decode(node)\n\n raise Exception('Method not found')\n",
"def digest(self, method, node):\n \"\"\"\n Returns the digest of an object from a method name\n :param method: hash method\n :type method: str\n :param node: Object to hash\n :type node: str\n :return: hash result\n \"\"\"\n if method not in constants.TransformUsageDigestMethod:\n raise Exception('Method not allowed')\n lib = hashlib.new(constants.TransformUsageDigestMethod[method])\n lib.update(node)\n return base64.b64encode(lib.digest())\n",
"def get_uri(self, uri, reference):\n \"\"\"\n It returns the node of the specified URI\n :param uri: uri of the \n :type uri: str\n :param reference: Reference node\n :type reference: etree.lxml.Element\n :return: Element of the URI in a String\n \"\"\"\n if uri == \"\":\n return self.canonicalization(\n constants.TransformInclC14N, reference.getroottree()\n )\n if uri.startswith(\"#\"):\n query = \"//*[@*[local-name() = '{}' ]=$uri]\"\n node = reference.getroottree()\n results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)\n if len(results) == 0:\n results = self.check_uri_attr(node, query, uri, 'ID')\n if len(results) == 0:\n results = self.check_uri_attr(node, query, uri, 'Id')\n if len(results) == 0:\n results = self.check_uri_attr(node, query, uri, 'id')\n if len(results) > 1:\n raise Exception(\n \"Ambiguous reference URI {} resolved to {} nodes\".format(\n uri, len(results)))\n elif len(results) == 1:\n return self.canonicalization(\n constants.TransformInclC14N, results[0]\n )\n raise Exception('URI \"' + uri + '\" cannot be readed')\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.calculate_signature
|
python
|
def calculate_signature(self, node, sign=True):
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
|
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L310-L352
|
[
"def b64_print(s):\n \"\"\"\n Prints a string with spaces at every b64_intro characters\n :param s: String to print\n :return: String\n \"\"\"\n if USING_PYTHON2:\n string = str(s)\n else:\n string = str(s, 'utf8')\n return '\\n'.join(\n string[pos:pos + b64_intro] for pos in range(0, len(string), b64_intro)\n )\n",
"def canonicalization(self, method, node):\n \"\"\"\n Canonicalizes a node following the method\n :param method: Method identification\n :type method: str\n :param node: object to canonicalize\n :type node: str\n :return: Canonicalized node in a String\n \"\"\"\n if method not in constants.TransformUsageC14NMethod:\n raise Exception('Method not allowed: ' + method)\n c14n_method = constants.TransformUsageC14NMethod[method]\n return etree.tostring(\n node,\n method=c14n_method['method'],\n with_comments=c14n_method['comments'],\n exclusive=c14n_method['exclusive']\n )\n"
] |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def load_pkcs12(self, key):
"""
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
"""
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
etobella/python-xmlsig
|
src/xmlsig/signature_context.py
|
SignatureContext.load_pkcs12
|
python
|
def load_pkcs12(self, key):
self.x509 = key.get_certificate().to_cryptography()
self.public_key = key.get_certificate().to_cryptography().public_key()
self.private_key = key.get_privatekey().to_cryptography_key()
|
This function fills the context public_key, private_key and x509 from
PKCS12 Object
:param key: the PKCS12 Object
:type key: OpenSSL.crypto.PKCS12
:return: None
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/signature_context.py#L354-L364
| null |
class SignatureContext(object):
"""
Signature context is used to sign and verify Signature nodes with keys
"""
def __init__(self):
self.x509 = None
self.crl = None
self.private_key = None
self.public_key = None
self.key_name = None
def sign(self, node):
"""
Signs a Signature node
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
signature_method = signed_info.find('ds:SignatureMethod',
namespaces=constants.NS_MAP).get(
'Algorithm')
key_info = node.find('ds:KeyInfo', namespaces=constants.NS_MAP)
if key_info is not None:
self.fill_key_info(key_info, signature_method)
self.fill_signed_info(signed_info)
self.calculate_signature(node)
def fill_key_info(self, key_info, signature_method):
"""
Fills the KeyInfo node
:param key_info: KeyInfo node
:type key_info: lxml.etree.Element
:param signature_method: Signature node to use
:type signature_method: str
:return: None
"""
x509_data = key_info.find('ds:X509Data', namespaces=constants.NS_MAP)
if x509_data is not None:
self.fill_x509_data(x509_data)
key_name = key_info.find('ds:KeyName', namespaces=constants.NS_MAP)
if key_name is not None and self.key_name is not None:
key_name.text = self.key_name
key_value = key_info.find('ds:KeyValue', namespaces=constants.NS_MAP)
if key_value is not None:
key_value.text = '\n'
signature = constants.TransformUsageSignatureMethod[
signature_method
]
key = self.public_key
if self.public_key is None:
key = self.private_key.public_key()
if not isinstance(
key, signature['method'].public_key_class
):
raise Exception('Key not compatible with signature method')
signature['method'].key_value(key_value, key)
def fill_x509_data(self, x509_data):
"""
Fills the X509Data Node
:param x509_data: X509Data Node
:type x509_data: lxml.etree.Element
:return: None
"""
x509_issuer_serial = x509_data.find(
'ds:X509IssuerSerial', namespaces=constants.NS_MAP
)
if x509_issuer_serial is not None:
self.fill_x509_issuer_name(x509_issuer_serial)
x509_crl = x509_data.find('ds:X509CRL', namespaces=constants.NS_MAP)
if x509_crl is not None and self.crl is not None:
x509_data.text = base64.b64encode(
self.crl.public_bytes(serialization.Encoding.DER)
)
x509_subject = x509_data.find(
'ds:X509SubjectName', namespaces=constants.NS_MAP
)
if x509_subject is not None:
x509_subject.text = get_rdns_name(self.x509.subject.rdns)
x509_ski = x509_data.find('ds:X509SKI', namespaces=constants.NS_MAP)
if x509_ski is not None:
x509_ski.text = base64.b64encode(
self.x509.extensions.get_extension_for_oid(
ExtensionOID.SUBJECT_KEY_IDENTIFIER
).value.digest)
x509_certificate = x509_data.find(
'ds:X509Certificate', namespaces=constants.NS_MAP
)
if x509_certificate is not None:
s = base64.b64encode(
self.x509.public_bytes(encoding=serialization.Encoding.DER)
)
x509_certificate.text = b64_print(s)
def fill_x509_issuer_name(self, x509_issuer_serial):
"""
Fills the X509IssuerSerial node
:param x509_issuer_serial: X509IssuerSerial node
:type x509_issuer_serial: lxml.etree.Element
:return: None
"""
x509_issuer_name = x509_issuer_serial.find(
'ds:X509IssuerName', namespaces=constants.NS_MAP
)
if x509_issuer_name is not None:
x509_issuer_name.text = get_rdns_name(self.x509.issuer.rdns)
x509_issuer_number = x509_issuer_serial.find(
'ds:X509SerialNumber', namespaces=constants.NS_MAP
)
if x509_issuer_number is not None:
x509_issuer_number.text = str(self.x509.serial_number)
def fill_signed_info(self, signed_info):
"""
Fills the SignedInfo node
:param signed_info: SignedInfo node
:type signed_info: lxml.etree.Element
:return: None
"""
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
self.calculate_reference(reference, True)
def verify(self, node):
"""
Verifies a signature
:param node: Signature node
:type node: lxml.etree.Element
:return: None
"""
# Added XSD Validation
with open(path.join(
path.dirname(__file__), "data/xmldsig-core-schema.xsd"
), "rb") as file:
schema = etree.XMLSchema(etree.fromstring(file.read()))
schema.assertValid(node)
# Validates reference value
signed_info = node.find('ds:SignedInfo', namespaces=constants.NS_MAP)
for reference in signed_info.findall(
'ds:Reference', namespaces=constants.NS_MAP
):
if not self.calculate_reference(reference, False):
raise Exception(
'Reference with URI:"' +
reference.get("URI", '') +
'" failed'
)
# Validates signature value
self.calculate_signature(node, False)
def transform(self, transform, node):
"""
Transforms a node following the transform especification
:param transform: Transform node
:type transform: lxml.etree.Element
:param node: Element to transform
:type node: str
:return: Transformed node in a String
"""
method = transform.get('Algorithm')
if method not in constants.TransformUsageDSigTransform:
raise Exception('Method not allowed')
# C14N methods are allowed
if method in constants.TransformUsageC14NMethod:
return self.canonicalization(method, etree.fromstring(node))
# Enveloped method removes the Signature Node from the element
if method == constants.TransformEnveloped:
tree = transform.getroottree()
root = etree.fromstring(node)
signature = root.find(
tree.getelementpath(
transform.getparent().getparent().getparent().getparent()
)
)
root.remove(signature)
return self.canonicalization(
constants.TransformInclC14N, root)
if method == constants.TransformBase64:
try:
root = etree.fromstring(node)
return base64.b64decode(root.text)
except Exception:
return base64.b64decode(node)
raise Exception('Method not found')
def canonicalization(self, method, node):
"""
Canonicalizes a node following the method
:param method: Method identification
:type method: str
:param node: object to canonicalize
:type node: str
:return: Canonicalized node in a String
"""
if method not in constants.TransformUsageC14NMethod:
raise Exception('Method not allowed: ' + method)
c14n_method = constants.TransformUsageC14NMethod[method]
return etree.tostring(
node,
method=c14n_method['method'],
with_comments=c14n_method['comments'],
exclusive=c14n_method['exclusive']
)
def digest(self, method, node):
"""
Returns the digest of an object from a method name
:param method: hash method
:type method: str
:param node: Object to hash
:type node: str
:return: hash result
"""
if method not in constants.TransformUsageDigestMethod:
raise Exception('Method not allowed')
lib = hashlib.new(constants.TransformUsageDigestMethod[method])
lib.update(node)
return base64.b64encode(lib.digest())
def get_uri(self, uri, reference):
"""
It returns the node of the specified URI
:param uri: uri of the
:type uri: str
:param reference: Reference node
:type reference: etree.lxml.Element
:return: Element of the URI in a String
"""
if uri == "":
return self.canonicalization(
constants.TransformInclC14N, reference.getroottree()
)
if uri.startswith("#"):
query = "//*[@*[local-name() = '{}' ]=$uri]"
node = reference.getroottree()
results = self.check_uri_attr(node, query, uri, constants.ID_ATTR)
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'ID')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'Id')
if len(results) == 0:
results = self.check_uri_attr(node, query, uri, 'id')
if len(results) > 1:
raise Exception(
"Ambiguous reference URI {} resolved to {} nodes".format(
uri, len(results)))
elif len(results) == 1:
return self.canonicalization(
constants.TransformInclC14N, results[0]
)
raise Exception('URI "' + uri + '" cannot be readed')
def check_uri_attr(self, node, xpath_query, uri, attr):
return node.xpath(xpath_query.format(attr), uri=uri.lstrip("#"))
def calculate_reference(self, reference, sign=True):
"""
Calculates or verifies the digest of the reference
:param reference: Reference node
:type reference: lxml.etree.Element
:param sign: It marks if we must sign or check a signature
:type sign: bool
:return: None
"""
node = self.get_uri(reference.get('URI', ''), reference)
transforms = reference.find(
'ds:Transforms', namespaces=constants.NS_MAP
)
if transforms is not None:
for transform in transforms.findall(
'ds:Transform', namespaces=constants.NS_MAP
):
node = self.transform(transform, node)
digest_value = self.digest(
reference.find(
'ds:DigestMethod', namespaces=constants.NS_MAP
).get('Algorithm'),
node
)
if not sign:
return digest_value.decode() == reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text
reference.find(
'ds:DigestValue', namespaces=constants.NS_MAP
).text = digest_value
def calculate_signature(self, node, sign=True):
"""
Calculate or verifies the signature
:param node: Signature node
:type node: lxml.etree.Element
:param sign: It checks if it must calculate or verify
:type sign: bool
:return: None
"""
signed_info_xml = node.find('ds:SignedInfo',
namespaces=constants.NS_MAP)
canonicalization_method = signed_info_xml.find(
'ds:CanonicalizationMethod', namespaces=constants.NS_MAP
).get('Algorithm')
signature_method = signed_info_xml.find(
'ds:SignatureMethod', namespaces=constants.NS_MAP
).get('Algorithm')
if signature_method not in constants.TransformUsageSignatureMethod:
raise Exception('Method ' + signature_method + ' not accepted')
signature = constants.TransformUsageSignatureMethod[signature_method]
signed_info = self.canonicalization(
canonicalization_method, signed_info_xml
)
if not sign:
signature_value = node.find('ds:SignatureValue',
namespaces=constants.NS_MAP).text
public_key = signature['method'].get_public_key(node, self)
signature['method'].verify(
signature_value,
signed_info,
public_key,
signature['digest']
)
else:
node.find(
'ds:SignatureValue', namespaces=constants.NS_MAP
).text = b64_print(base64.b64encode(
signature['method'].sign(
signed_info,
self.private_key,
signature['digest']
)
))
|
etobella/python-xmlsig
|
src/xmlsig/algorithms/base.py
|
Algorithm.get_public_key
|
python
|
def get_public_key(key_info, ctx):
x509_certificate = key_info.find(
'ds:KeyInfo/ds:X509Data/ds:X509Certificate',
namespaces={'ds': ns.DSigNs}
)
if x509_certificate is not None:
return load_der_x509_certificate(
base64.b64decode(x509_certificate.text),
default_backend()
).public_key()
if ctx.public_key is not None:
return ctx.public_key
if isinstance(ctx.private_key, (str, bytes)):
return ctx.private_key
return ctx.private_key.public_key()
|
Get the public key if its defined in X509Certificate node. Otherwise,
take self.public_key element
:param sign: Signature node
:type sign: lxml.etree.Element
:return: Public key to use
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/algorithms/base.py#L31-L52
| null |
class Algorithm(object):
private_key_class = None
public_key_class = None
@staticmethod
def sign(data, private_key, digest):
raise Exception("Sign function must be redefined")
@staticmethod
def verify(signature_value, data, public_key, digest):
raise Exception("Verify function must be redefined")
@staticmethod
def key_value(node, public_key):
raise Exception("Key Value function must be redefined")
@staticmethod
|
etobella/python-xmlsig
|
src/xmlsig/utils.py
|
b64_print
|
python
|
def b64_print(s):
if USING_PYTHON2:
string = str(s)
else:
string = str(s, 'utf8')
return '\n'.join(
string[pos:pos + b64_intro] for pos in range(0, len(string), b64_intro)
)
|
Prints a string with spaces at every b64_intro characters
:param s: String to print
:return: String
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/utils.py#L27-L39
| null |
# -*- coding: utf-8 -*-
# © 2017 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import struct
import sys
from cryptography.x509 import oid
from lxml import etree
OID_NAMES = {
oid.NameOID.COMMON_NAME: 'CN',
oid.NameOID.COUNTRY_NAME: 'C',
oid.NameOID.DOMAIN_COMPONENT: 'DC',
oid.NameOID.EMAIL_ADDRESS: 'E',
oid.NameOID.GIVEN_NAME: 'G',
oid.NameOID.LOCALITY_NAME: 'L',
oid.NameOID.ORGANIZATION_NAME: 'O',
oid.NameOID.ORGANIZATIONAL_UNIT_NAME: 'OU',
oid.NameOID.SURNAME: 'SN'
}
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
b64_intro = 64
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def os2ip(arr):
x_len = len(arr)
x = 0
for i in range(x_len):
if USING_PYTHON2:
val = struct.unpack('B', arr[i])[0]
else:
val = arr[i]
x = x + (val * pow(256, x_len - i - 1))
return x
def create_node(name, parent=None, ns='', tail=False, text=False):
"""
Creates a new node
:param name: Node name
:param parent: Node parent
:param ns: Namespace to use
:param tail: Tail to add
:param text: Text of the node
:return: New node
"""
node = etree.Element(etree.QName(ns, name))
if parent is not None:
parent.append(node)
if tail:
node.tail = tail
if text:
node.text = text
return node
def get_rdns_name(rdns):
"""
Gets the rdns String name
:param rdns: RDNS object
:type rdns: cryptography.x509.RelativeDistinguishedName
:return: RDNS name
"""
name = ''
for rdn in rdns:
for attr in rdn._attributes:
if len(name) > 0:
name = name + ','
if attr.oid in OID_NAMES:
name = name + OID_NAMES[attr.oid]
else:
name = name + attr.oid._name
name = name + '=' + attr.value
return name
|
etobella/python-xmlsig
|
src/xmlsig/utils.py
|
create_node
|
python
|
def create_node(name, parent=None, ns='', tail=False, text=False):
node = etree.Element(etree.QName(ns, name))
if parent is not None:
parent.append(node)
if tail:
node.tail = tail
if text:
node.text = text
return node
|
Creates a new node
:param name: Node name
:param parent: Node parent
:param ns: Namespace to use
:param tail: Tail to add
:param text: Text of the node
:return: New node
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/utils.py#L85-L102
| null |
# -*- coding: utf-8 -*-
# © 2017 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import struct
import sys
from cryptography.x509 import oid
from lxml import etree
OID_NAMES = {
oid.NameOID.COMMON_NAME: 'CN',
oid.NameOID.COUNTRY_NAME: 'C',
oid.NameOID.DOMAIN_COMPONENT: 'DC',
oid.NameOID.EMAIL_ADDRESS: 'E',
oid.NameOID.GIVEN_NAME: 'G',
oid.NameOID.LOCALITY_NAME: 'L',
oid.NameOID.ORGANIZATION_NAME: 'O',
oid.NameOID.ORGANIZATIONAL_UNIT_NAME: 'OU',
oid.NameOID.SURNAME: 'SN'
}
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
b64_intro = 64
def b64_print(s):
"""
Prints a string with spaces at every b64_intro characters
:param s: String to print
:return: String
"""
if USING_PYTHON2:
string = str(s)
else:
string = str(s, 'utf8')
return '\n'.join(
string[pos:pos + b64_intro] for pos in range(0, len(string), b64_intro)
)
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def os2ip(arr):
x_len = len(arr)
x = 0
for i in range(x_len):
if USING_PYTHON2:
val = struct.unpack('B', arr[i])[0]
else:
val = arr[i]
x = x + (val * pow(256, x_len - i - 1))
return x
def get_rdns_name(rdns):
"""
Gets the rdns String name
:param rdns: RDNS object
:type rdns: cryptography.x509.RelativeDistinguishedName
:return: RDNS name
"""
name = ''
for rdn in rdns:
for attr in rdn._attributes:
if len(name) > 0:
name = name + ','
if attr.oid in OID_NAMES:
name = name + OID_NAMES[attr.oid]
else:
name = name + attr.oid._name
name = name + '=' + attr.value
return name
|
etobella/python-xmlsig
|
src/xmlsig/utils.py
|
get_rdns_name
|
python
|
def get_rdns_name(rdns):
name = ''
for rdn in rdns:
for attr in rdn._attributes:
if len(name) > 0:
name = name + ','
if attr.oid in OID_NAMES:
name = name + OID_NAMES[attr.oid]
else:
name = name + attr.oid._name
name = name + '=' + attr.value
return name
|
Gets the rdns String name
:param rdns: RDNS object
:type rdns: cryptography.x509.RelativeDistinguishedName
:return: RDNS name
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/utils.py#L105-L122
| null |
# -*- coding: utf-8 -*-
# © 2017 Creu Blanca
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import struct
import sys
from cryptography.x509 import oid
from lxml import etree
OID_NAMES = {
oid.NameOID.COMMON_NAME: 'CN',
oid.NameOID.COUNTRY_NAME: 'C',
oid.NameOID.DOMAIN_COMPONENT: 'DC',
oid.NameOID.EMAIL_ADDRESS: 'E',
oid.NameOID.GIVEN_NAME: 'G',
oid.NameOID.LOCALITY_NAME: 'L',
oid.NameOID.ORGANIZATION_NAME: 'O',
oid.NameOID.ORGANIZATIONAL_UNIT_NAME: 'OU',
oid.NameOID.SURNAME: 'SN'
}
USING_PYTHON2 = True if sys.version_info < (3, 0) else False
b64_intro = 64
def b64_print(s):
"""
Prints a string with spaces at every b64_intro characters
:param s: String to print
:return: String
"""
if USING_PYTHON2:
string = str(s)
else:
string = str(s, 'utf8')
return '\n'.join(
string[pos:pos + b64_intro] for pos in range(0, len(string), b64_intro)
)
def long_to_bytes(n, blocksize=0):
"""long_to_bytes(n:long, blocksize:int) : string
Convert a long integer to a byte string.
If optional blocksize is given and greater than zero, pad the front of the
byte string with binary zeros so that the length is a multiple of
blocksize.
"""
# after much testing, this algorithm was deemed to be the fastest
s = b''
if USING_PYTHON2:
n = long(n) # noqa
pack = struct.pack
while n > 0:
s = pack(b'>I', n & 0xffffffff) + s
n = n >> 32
# strip off leading zeros
for i in range(len(s)):
if s[i] != b'\000'[0]:
break
else:
# only happens when n == 0
s = b'\000'
i = 0
s = s[i:]
# add back some pad bytes. this could be done more efficiently w.r.t. the
# de-padding being done above, but sigh...
if blocksize > 0 and len(s) % blocksize:
s = (blocksize - len(s) % blocksize) * b'\000' + s
return s
def os2ip(arr):
x_len = len(arr)
x = 0
for i in range(x_len):
if USING_PYTHON2:
val = struct.unpack('B', arr[i])[0]
else:
val = arr[i]
x = x + (val * pow(256, x_len - i - 1))
return x
def create_node(name, parent=None, ns='', tail=False, text=False):
"""
Creates a new node
:param name: Node name
:param parent: Node parent
:param ns: Namespace to use
:param tail: Tail to add
:param text: Text of the node
:return: New node
"""
node = etree.Element(etree.QName(ns, name))
if parent is not None:
parent.append(node)
if tail:
node.tail = tail
if text:
node.text = text
return node
|
etobella/python-xmlsig
|
src/xmlsig/algorithms/rsa.py
|
RSAAlgorithm.get_public_key
|
python
|
def get_public_key(key_info, ctx):
key = key_info.find(
'ds:KeyInfo/ds:KeyValue/ds:RSAKeyValue', namespaces=NS_MAP
)
if key is not None:
n = os2ip(b64decode(key.find(
'ds:Modulus', namespaces=NS_MAP).text))
e = os2ip(b64decode(key.find(
'ds:Exponent', namespaces=NS_MAP).text))
return rsa.RSAPublicNumbers(e, n).public_key(default_backend())
return super(RSAAlgorithm, RSAAlgorithm).get_public_key(key_info, ctx)
|
Get the public key if its defined in X509Certificate node. Otherwise,
take self.public_key element
:param sign: Signature node
:type sign: lxml.etree.Element
:return: Public key to use
|
train
|
https://github.com/etobella/python-xmlsig/blob/120a50935a4d4c2c972cfa3f8519bbce7e30d67b/src/xmlsig/algorithms/rsa.py#L60-L77
| null |
class RSAAlgorithm(Algorithm):
private_key_class = rsa.RSAPrivateKey
public_key_class = rsa.RSAPublicKey
@staticmethod
def sign(data, private_key, digest):
return private_key.sign(
data,
padding.PKCS1v15(),
digest()
)
@staticmethod
def verify(signature_value, data, public_key, digest):
public_key.verify(
b64decode(signature_value),
data,
padding.PKCS1v15(),
digest()
)
@staticmethod
def key_value(node, public_key):
result = create_node(
'RSAKeyValue', node, DSigNs, '\n', '\n'
)
create_node(
'Modulus',
result,
DSigNs,
tail='\n',
text=b64_print(b64encode(long_to_bytes(
public_key.public_numbers().n
)))
)
create_node(
'Exponent',
result,
DSigNs,
tail='\n',
text=b64encode(long_to_bytes(public_key.public_numbers().e))
)
return result
@staticmethod
|
Aula13/poloniex
|
poloniex/concurrency.py
|
Semaphore.clear
|
python
|
def clear(self):
with self._cond:
to_notify = self._initial - self._value
self._value = self._initial
self._cond.notify(to_notify)
|
Release the semaphore of all of its bounds, setting the internal
counter back to its original bind limit. Notify an equivalent amount
of threads that they can run.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/concurrency.py#L96-L103
| null |
class Semaphore(object):
"""This class implements semaphore objects.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
This is a replica of the Python3 implementation with a convenience clear method.
The reason this was duplicated rather than subclasses is because on Python2,
the necessary value attributes are hard-private instead of soft-private.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
self._cond = threading.Condition(threading.Lock())
self._initial = self._value = value
def acquire(self, blocking=True, timeout=None):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
When invoked with a timeout other than None, it will block for at
most timeout seconds. If acquire does not complete successfully in
that interval, return false. Return true otherwise.
"""
if not blocking and timeout is not None:
raise ValueError("can't specify timeout for non-blocking acquire")
rc = False
endtime = None
with self._cond:
while self._value == 0:
if not blocking:
break
if timeout is not None:
if endtime is None:
endtime = threading._time() + timeout
else:
timeout = endtime - threading._time()
if timeout <= 0:
break
self._cond.wait(timeout)
else:
self._value -= 1
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self._cond:
self._value += 1
self._cond.notify()
def __exit__(self, t, v, tb):
self.release()
|
Aula13/poloniex
|
poloniex/poloniex.py
|
_api_wrapper
|
python
|
def _api_wrapper(fn):
def _convert(value):
if isinstance(value, _datetime.date):
return value.strftime('%s')
return value
@_six.wraps(fn)
def _fn(self, command, **params):
# sanitize the params by removing the None values
with self.startup_lock:
if self.timer.ident is None:
self.timer.setDaemon(True)
self.timer.start()
params = dict((key, _convert(value))
for key, value in _six.iteritems(params)
if value is not None)
self.semaphore.acquire()
resp = fn(self, command, **params)
try:
respdata = resp.json(object_hook=_AutoCastDict)
except:
# use more specific error if available or fallback to ValueError
resp.raise_for_status()
raise Exception('No JSON object could be decoded')
# check for 'error' then check for status due to Poloniex inconsistency
if 'error' in respdata:
raise PoloniexCommandException(respdata['error'])
resp.raise_for_status()
return respdata
return _fn
|
API function decorator that performs rate limiting and error checking.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L20-L55
| null |
import six as _six
import hmac as _hmac
import time as _time
import atexit as _atexit
import hashlib as _hashlib
import datetime as _datetime
import requests as _requests
import itertools as _itertools
import threading as _threading
from .concurrency import RecurrentTimer, Semaphore
from .utils import AutoCastDict as _AutoCastDict
from .exceptions import (PoloniexCredentialsException,
PoloniexCommandException)
_PUBLIC_URL = 'https://poloniex.com/public'
_PRIVATE_URL = 'https://poloniex.com/tradingApi'
class PoloniexPublic(object):
"""Client to connect to Poloniex public APIs"""
def __init__(self, public_url=_PUBLIC_URL, limit=6,
session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None):
"""Initialize Poloniex client."""
self._public_url = public_url
self.startup_lock = startup_lock or _threading.RLock()
self.semaphore = semaphore or Semaphore(limit)
self.timer = timer or RecurrentTimer(1.0, self.semaphore.clear)
self.session = session or session_class()
_atexit.register(self.__del__)
def __del__(self):
self.timer.cancel()
if self.timer.ident is not None: # timer was started
self.timer.join()
@_api_wrapper
def _public(self, command, **params):
"""Invoke the 'command' public API with optional params."""
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
def returnTicker(self):
"""Returns the ticker for all markets."""
return self._public('returnTicker')
def return24hVolume(self):
"""Returns the 24-hour volume for all markets, plus totals for
primary currencies."""
return self._public('return24hVolume')
def returnOrderBook(self, currencyPair='all', depth='50'):
"""Returns the order book for a given market, as well as a sequence
number for use with the Push API and an indicator specifying whether
the market is frozen. You may set currencyPair to "all" to get the
order books of all markets."""
return self._public('returnOrderBook', currencyPair=currencyPair,
depth=depth)
def returnTradeHistory(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return self._public('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end)
def returnChartData(self, currencyPair, period, start=0, end=2**32-1):
"""Returns candlestick chart data. Required GET parameters are
"currencyPair", "period" (candlestick period in seconds; valid values
are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end".
"Start" and "end" are given in UNIX timestamp format and used to
specify the date range for the data returned."""
return self._public('returnChartData', currencyPair=currencyPair,
period=period, start=start, end=end)
def returnCurrencies(self):
"""Returns information about currencies."""
return self._public('returnCurrencies')
def returnLoanOrders(self, currency):
"""Returns the list of loan offers and demands for a given currency,
specified by the "currency" GET parameter."""
return self._public('returnLoanOrders', currency=currency)
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
PoloniexPublic._public
|
python
|
def _public(self, command, **params):
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
|
Invoke the 'command' public API with optional params.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L80-L84
| null |
class PoloniexPublic(object):
"""Client to connect to Poloniex public APIs"""
def __init__(self, public_url=_PUBLIC_URL, limit=6,
session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None):
"""Initialize Poloniex client."""
self._public_url = public_url
self.startup_lock = startup_lock or _threading.RLock()
self.semaphore = semaphore or Semaphore(limit)
self.timer = timer or RecurrentTimer(1.0, self.semaphore.clear)
self.session = session or session_class()
_atexit.register(self.__del__)
def __del__(self):
self.timer.cancel()
if self.timer.ident is not None: # timer was started
self.timer.join()
@_api_wrapper
def returnTicker(self):
"""Returns the ticker for all markets."""
return self._public('returnTicker')
def return24hVolume(self):
"""Returns the 24-hour volume for all markets, plus totals for
primary currencies."""
return self._public('return24hVolume')
def returnOrderBook(self, currencyPair='all', depth='50'):
"""Returns the order book for a given market, as well as a sequence
number for use with the Push API and an indicator specifying whether
the market is frozen. You may set currencyPair to "all" to get the
order books of all markets."""
return self._public('returnOrderBook', currencyPair=currencyPair,
depth=depth)
def returnTradeHistory(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return self._public('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end)
def returnChartData(self, currencyPair, period, start=0, end=2**32-1):
"""Returns candlestick chart data. Required GET parameters are
"currencyPair", "period" (candlestick period in seconds; valid values
are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end".
"Start" and "end" are given in UNIX timestamp format and used to
specify the date range for the data returned."""
return self._public('returnChartData', currencyPair=currencyPair,
period=period, start=start, end=end)
def returnCurrencies(self):
"""Returns information about currencies."""
return self._public('returnCurrencies')
def returnLoanOrders(self, currency):
"""Returns the list of loan offers and demands for a given currency,
specified by the "currency" GET parameter."""
return self._public('returnLoanOrders', currency=currency)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
PoloniexPublic.returnOrderBook
|
python
|
def returnOrderBook(self, currencyPair='all', depth='50'):
return self._public('returnOrderBook', currencyPair=currencyPair,
depth=depth)
|
Returns the order book for a given market, as well as a sequence
number for use with the Push API and an indicator specifying whether
the market is frozen. You may set currencyPair to "all" to get the
order books of all markets.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L95-L101
| null |
class PoloniexPublic(object):
"""Client to connect to Poloniex public APIs"""
def __init__(self, public_url=_PUBLIC_URL, limit=6,
session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None):
"""Initialize Poloniex client."""
self._public_url = public_url
self.startup_lock = startup_lock or _threading.RLock()
self.semaphore = semaphore or Semaphore(limit)
self.timer = timer or RecurrentTimer(1.0, self.semaphore.clear)
self.session = session or session_class()
_atexit.register(self.__del__)
def __del__(self):
self.timer.cancel()
if self.timer.ident is not None: # timer was started
self.timer.join()
@_api_wrapper
def _public(self, command, **params):
"""Invoke the 'command' public API with optional params."""
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
def returnTicker(self):
"""Returns the ticker for all markets."""
return self._public('returnTicker')
def return24hVolume(self):
"""Returns the 24-hour volume for all markets, plus totals for
primary currencies."""
return self._public('return24hVolume')
def returnTradeHistory(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return self._public('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end)
def returnChartData(self, currencyPair, period, start=0, end=2**32-1):
"""Returns candlestick chart data. Required GET parameters are
"currencyPair", "period" (candlestick period in seconds; valid values
are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end".
"Start" and "end" are given in UNIX timestamp format and used to
specify the date range for the data returned."""
return self._public('returnChartData', currencyPair=currencyPair,
period=period, start=start, end=end)
def returnCurrencies(self):
"""Returns information about currencies."""
return self._public('returnCurrencies')
def returnLoanOrders(self, currency):
"""Returns the list of loan offers and demands for a given currency,
specified by the "currency" GET parameter."""
return self._public('returnLoanOrders', currency=currency)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
PoloniexPublic.returnTradeHistory
|
python
|
def returnTradeHistory(self, currencyPair, start=None, end=None):
return self._public('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end)
|
Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L103-L108
| null |
class PoloniexPublic(object):
"""Client to connect to Poloniex public APIs"""
def __init__(self, public_url=_PUBLIC_URL, limit=6,
session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None):
"""Initialize Poloniex client."""
self._public_url = public_url
self.startup_lock = startup_lock or _threading.RLock()
self.semaphore = semaphore or Semaphore(limit)
self.timer = timer or RecurrentTimer(1.0, self.semaphore.clear)
self.session = session or session_class()
_atexit.register(self.__del__)
def __del__(self):
self.timer.cancel()
if self.timer.ident is not None: # timer was started
self.timer.join()
@_api_wrapper
def _public(self, command, **params):
"""Invoke the 'command' public API with optional params."""
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
def returnTicker(self):
"""Returns the ticker for all markets."""
return self._public('returnTicker')
def return24hVolume(self):
"""Returns the 24-hour volume for all markets, plus totals for
primary currencies."""
return self._public('return24hVolume')
def returnOrderBook(self, currencyPair='all', depth='50'):
"""Returns the order book for a given market, as well as a sequence
number for use with the Push API and an indicator specifying whether
the market is frozen. You may set currencyPair to "all" to get the
order books of all markets."""
return self._public('returnOrderBook', currencyPair=currencyPair,
depth=depth)
def returnChartData(self, currencyPair, period, start=0, end=2**32-1):
"""Returns candlestick chart data. Required GET parameters are
"currencyPair", "period" (candlestick period in seconds; valid values
are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end".
"Start" and "end" are given in UNIX timestamp format and used to
specify the date range for the data returned."""
return self._public('returnChartData', currencyPair=currencyPair,
period=period, start=start, end=end)
def returnCurrencies(self):
"""Returns information about currencies."""
return self._public('returnCurrencies')
def returnLoanOrders(self, currency):
"""Returns the list of loan offers and demands for a given currency,
specified by the "currency" GET parameter."""
return self._public('returnLoanOrders', currency=currency)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
PoloniexPublic.returnChartData
|
python
|
def returnChartData(self, currencyPair, period, start=0, end=2**32-1):
return self._public('returnChartData', currencyPair=currencyPair,
period=period, start=start, end=end)
|
Returns candlestick chart data. Required GET parameters are
"currencyPair", "period" (candlestick period in seconds; valid values
are 300, 900, 1800, 7200, 14400, and 86400), "start", and "end".
"Start" and "end" are given in UNIX timestamp format and used to
specify the date range for the data returned.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L110-L117
| null |
class PoloniexPublic(object):
"""Client to connect to Poloniex public APIs"""
def __init__(self, public_url=_PUBLIC_URL, limit=6,
session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None):
"""Initialize Poloniex client."""
self._public_url = public_url
self.startup_lock = startup_lock or _threading.RLock()
self.semaphore = semaphore or Semaphore(limit)
self.timer = timer or RecurrentTimer(1.0, self.semaphore.clear)
self.session = session or session_class()
_atexit.register(self.__del__)
def __del__(self):
self.timer.cancel()
if self.timer.ident is not None: # timer was started
self.timer.join()
@_api_wrapper
def _public(self, command, **params):
"""Invoke the 'command' public API with optional params."""
params['command'] = command
response = self.session.get(self._public_url, params=params)
return response
def returnTicker(self):
"""Returns the ticker for all markets."""
return self._public('returnTicker')
def return24hVolume(self):
"""Returns the 24-hour volume for all markets, plus totals for
primary currencies."""
return self._public('return24hVolume')
def returnOrderBook(self, currencyPair='all', depth='50'):
"""Returns the order book for a given market, as well as a sequence
number for use with the Push API and an indicator specifying whether
the market is frozen. You may set currencyPair to "all" to get the
order books of all markets."""
return self._public('returnOrderBook', currencyPair=currencyPair,
depth=depth)
def returnTradeHistory(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return self._public('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end)
def returnCurrencies(self):
"""Returns information about currencies."""
return self._public('returnCurrencies')
def returnLoanOrders(self, currency):
"""Returns the list of loan offers and demands for a given currency,
specified by the "currency" GET parameter."""
return self._public('returnLoanOrders', currency=currency)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex._private
|
python
|
def _private(self, command, **params):
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
|
Invoke the 'command' public API with optional params.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L169-L179
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.returnDepositsWithdrawals
|
python
|
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
return self._private('returnDepositsWithdrawals', start=start, end=end)
|
Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L203-L207
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.returnTradeHistory
|
python
|
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
|
Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L227-L235
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.returnTradeHistoryPublic
|
python
|
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
|
Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L237-L241
|
[
"def returnTradeHistory(self, currencyPair, start=None, end=None):\n \"\"\"Returns the past 200 trades for a given market, or up to 50,000\n trades between a range specified in UNIX timestamps by the \"start\"\n and \"end\" GET parameters.\"\"\"\n return self._public('returnTradeHistory', currencyPair=currencyPair,\n start=start, end=end)\n"
] |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.buy
|
python
|
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
|
Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L250-L266
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.moveOrder
|
python
|
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
|
Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L282-L292
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.withdraw
|
python
|
def withdraw(self, currency, amount, address, paymentId=None):
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
|
Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId".
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L294-L301
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.transferBalance
|
python
|
def transferBalance(self, currency, amount, fromAccount, toAccount):
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
|
Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount".
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L322-L328
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.marginBuy
|
python
|
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
|
Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L336-L343
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.marginSell
|
python
|
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
|
Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L345-L349
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.createLoanOffer
|
python
|
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
|
Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate".
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L369-L376
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
"""Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned. """
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
Aula13/poloniex
|
poloniex/poloniex.py
|
Poloniex.returnLendingHistory
|
python
|
def returnLendingHistory(self, start=0, end=2**32-1, limit=None):
return self._private('returnLendingHistory', start=start, end=end,
limit=limit)
|
Returns your lending history within a time range specified by the
"start" and "end" POST parameters as UNIX timestamps. "limit" may also
be specified to limit the number of rows returned.
|
train
|
https://github.com/Aula13/poloniex/blob/a5bfc91e766e220bf77f5e3a1b131f095913e714/poloniex/poloniex.py#L391-L396
| null |
class Poloniex(PoloniexPublic):
"""Client to connect to Poloniex private APIs."""
class _PoloniexAuth(_requests.auth.AuthBase):
"""Poloniex Request Authentication."""
def __init__(self, apikey, secret):
self._apikey, self._secret = apikey, secret
def __call__(self, request):
signature = _hmac.new(
str.encode(self._secret, 'utf-8'),
str.encode(request.body, 'utf-8'),
_hashlib.sha512
)
request.headers.update({"Key": self._apikey,
"Sign": signature.hexdigest()})
return request
def __init__(self, apikey=None, secret=None,
public_url=_PUBLIC_URL,
private_url=_PRIVATE_URL,
limit=6, session_class=_requests.Session,
session=None, startup_lock=None,
semaphore=None, timer=None,
nonce_iter=None, nonce_lock=None):
"""Initialize the Poloniex private client."""
super(Poloniex, self).__init__(public_url, limit,
session_class,
session, startup_lock,
semaphore, timer)
self._private_url = private_url
self._apikey = apikey
self._secret = secret
self.nonce_lock = nonce_lock or _threading.RLock()
self.nonce_iter = nonce_iter or _itertools.count(int(_time.time() * 1000))
@_api_wrapper
def _private(self, command, **params):
"""Invoke the 'command' public API with optional params."""
if not self._apikey or not self._secret:
raise PoloniexCredentialsException('missing apikey/secret')
with self.nonce_lock:
params.update({'command': command, 'nonce': next(self.nonce_iter)})
response = self.session.post(
self._private_url, data=params,
auth=Poloniex._PoloniexAuth(self._apikey, self._secret))
return response
def returnBalances(self):
"""Returns all of your available balances."""
return self._private('returnBalances')
def returnCompleteBalances(self, account=None):
"""Returns all of your balances, including available balance, balance
on orders, and the estimated BTC value of your balance. By default,
this call is limited to your exchange account; set the "account" POST
parameter to "all" to include your margin and lending accounts."""
return self._private('returnCompleteBalances', account=account)
def returnDepositAddresses(self):
"""Returns all of your deposit addresses."""
return self._private('returnDepositAddresses')
def generateNewAddress(self, currency):
"""Generates a new deposit address for the currency specified by the
"currency" POST parameter. Only one address per currency per day may be
generated, and a new address may not be generated before the
previously-generated one has been used."""
return self._private('generateNewAddress', currency=currency)
def returnDepositsWithdrawals(self, start=0, end=2**32-1):
"""Returns your deposit and withdrawal history within a range,
specified by the "start" and "end" POST parameters, both of which
should be given as UNIX timestamps."""
return self._private('returnDepositsWithdrawals', start=start, end=end)
def returnDeposits(self, start=0, end=2**32-1):
"""Returns your deposit history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['deposits']
def returnWithdrawals(self, start=0, end=2**32-1):
"""Returns your withdrawal history within a range, specified by the
"start" and "end" POST parameters, both of which should be given as
UNIX timestamps."""
return self.returnDepositsWithdrawals(start, end)['withdrawals']
def returnOpenOrders(self, currencyPair='all'):
"""Returns your open orders for a given market, specified by the
"currencyPair" POST parameter, e.g. "BTC_XCP". Set "currencyPair" to
"all" to return open orders for all markets."""
return self._private('returnOpenOrders', currencyPair=currencyPair)
def returnTradeHistory(self, currencyPair='all', start=None, end=None, limit=500):
"""Returns your trade history for a given market, specified by the
"currencyPair" POST parameter. You may specify "all" as the
currencyPair to receive your trade history for all markets. You may
optionally specify a range via "start" and/or "end" POST parameters,
given in UNIX timestamp format; if you do not specify a range, it will
be limited to one day."""
return self._private('returnTradeHistory', currencyPair=currencyPair,
start=start, end=end, limit=limit)
def returnTradeHistoryPublic(self, currencyPair, start=None, end=None):
"""Returns the past 200 trades for a given market, or up to 50,000
trades between a range specified in UNIX timestamps by the "start"
and "end" GET parameters."""
return super(Poloniex, self).returnTradeHistory(currencyPair, start, end)
def returnOrderTrades(self, orderNumber):
"""Returns all trades involving a given order, specified by the
"orderNumber" POST parameter. If no trades for the order have occurred
or you specify an order that does not belong to you, you will receive
an error. """
return self._private('returnOrderTrades', orderNumber=orderNumber)
def buy(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a limit buy order in a given market. Required POST parameters
are "currencyPair", "rate", and "amount". If successful, the method
will return the order number.
You may optionally set "fillOrKill", "immediateOrCancel", "postOnly"
to 1. A fill-or-kill order will either fill in its entirety or be
completely aborted. An immediate-or-cancel order can be partially or
completely filled, but any portion of the order that cannot be filled
immediately will be canceled rather than left on the order book.
A post-only order will only be placed if no portion of it fills
immediately; this guarantees you will never pay the taker fee on any
part of the order that fills."""
return self._private('buy', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def sell(self, currencyPair, rate, amount, fillOrKill=None,
immediateOrCancel=None, postOnly=None):
"""Places a sell order in a given market. Parameters and output are
the same as for the buy method."""
return self._private('sell', currencyPair=currencyPair, rate=rate,
amount=amount, fillOrKill=fillOrKill,
immediateOrCancel=immediateOrCancel,
postOnly=postOnly)
def cancelOrder(self, orderNumber):
"""Cancels an order you have placed in a given market. Required POST
parameter is "orderNumber"."""
return self._private('cancelOrder', orderNumber=orderNumber)
def moveOrder(self, orderNumber, rate, amount=None, postOnly=None,
immediateOrCancel=None):
"""Cancels an order and places a new one of the same type in a single
atomic transaction, meaning either both operations will succeed or both
will fail. Required POST parameters are "orderNumber" and "rate"; you
may optionally specify "amount" if you wish to change the amount of
the new order. "postOnly" or "immediateOrCancel" may be specified for
exchange orders, but will have no effect on margin orders. """
return self._private('moveOrder', orderNumber=orderNumber, rate=rate,
amount=amount, postOnly=postOnly,
immediateOrCancel=immediateOrCancel)
def withdraw(self, currency, amount, address, paymentId=None):
"""Immediately places a withdrawal for a given currency, with no email
confirmation. In order to use this method, the withdrawal privilege
must be enabled for your API key. Required POST parameters are
"currency", "amount", and "address". For XMR withdrawals, you may
optionally specify "paymentId"."""
return self._private('withdraw', currency=currency, amount=amount,
address=address, paymentId=paymentId)
def returnFeeInfo(self):
"""If you are enrolled in the maker-taker fee schedule, returns your
current trading fees and trailing 30-day volume in BTC. This
information is updated once every 24 hours."""
return self._private('returnFeeInfo')
def returnAvailableAccountBalances(self, account=None):
"""Returns your balances sorted by account. You may optionally specify
the "account" POST parameter if you wish to fetch only the balances of
one account. Please note that balances in your margin account may not
be accessible if you have any open margin positions or orders."""
return self._private('returnAvailableAccountBalances', account=account)
def returnTradableBalances(self):
"""Returns your current tradable balances for each currency in each
market for which margin trading is enabled. Please note that these
balances may vary continually with market conditions."""
return self._private('returnTradableBalances')
def transferBalance(self, currency, amount, fromAccount, toAccount):
"""Transfers funds from one account to another (e.g. from your exchange
account to your margin account). Required POST parameters are
"currency", "amount", "fromAccount", and "toAccount"."""
return self._private('transferBalance', currency=currency,
amount=amount, fromAccount=fromAccount,
toAccount=toAccount)
def returnMarginAccountSummary(self):
"""Returns a summary of your entire margin account. This is the same
information you will find in the Margin Account section of the Margin
Trading page, under the Markets list. """
return self._private('returnMarginAccountSummary')
def marginBuy(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin buy order in a given market. Required POST
parameters are "currencyPair", "rate", and "amount". You may optionally
specify a maximum lending rate using the "lendingRate" parameter.
If successful, the method will return the order number and any trades
immediately resulting from your order."""
return self._private('marginBuy', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def marginSell(self, currencyPair, rate, amount, lendingRate=None):
"""Places a margin sell order in a given market. Parameters and output
are the same as for the marginBuy method."""
return self._private('marginSell', currencyPair=currencyPair, rate=rate,
amount=amount, lendingRate=lendingRate)
def getMarginPosition(self, currencyPair):
"""Returns information about your margin position in a given market,
specified by the "currencyPair" POST parameter. You may set
"currencyPair" to "all" if you wish to fetch all of your margin
positions at once. If you have no margin position in the specified
market, "type" will be set to "none". "liquidationPrice" is an
estimate, and does not necessarily represent the price at which an
actual forced liquidation will occur. If you have no liquidation
price, the value will be -1. """
return self._private('getMarginPosition', currencyPair=currencyPair)
def closeMarginPosition(self, currencyPair):
"""Closes your margin position in a given market (specified by the
"currencyPair" POST parameter) using a market order. This call will
also return success if you do not have an open position in the
specified market."""
return self._private('closeMarginPosition', currencyPair=currencyPair)
def createLoanOffer(self, currency, amount, duration, autoRenew,
lendingRate):
"""Creates a loan offer for a given currency. Required POST parameters
are "currency", "amount", "duration", "autoRenew" (0 or 1), and
"lendingRate". """
return self._private('createLoanOffer', currency=currency,
amount=amount, duration=duration,
autoRenew=autoRenew, lendingRate=lendingRate)
def cancelLoanOffer(self, orderNumber):
"""Cancels a loan offer specified by the "orderNumber" POST
parameter."""
return self._private('cancelLoanOffer', orderNumber=orderNumber)
def returnOpenLoanOffers(self):
"""Returns your open loan offers for each currency. """
return self._private('returnOpenLoanOffers')
def returnActiveLoans(self):
"""Returns your active loans for each currency."""
return self._private('returnActiveLoans')
def toggleAutoRenew(self, orderNumber):
"""Toggles the autoRenew setting on an active loan, specified by the
"orderNumber" POST parameter. If successful, "message" will indicate
the new autoRenew setting. """
return self._private('toggleAutoRenew', orderNumber=orderNumber)
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt_inline.py
|
salsa20_8
|
python
|
def salsa20_8(B, x, src, s_start, dest, d_start):
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
|
Salsa20/8 http://en.wikipedia.org/wiki/Salsa20
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt_inline.py#L58-L135
| null |
# Automatically generated file, see inline.py
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt_inline.py
|
blockmix_salsa8
|
python
|
def blockmix_salsa8(BY, Yi, r):
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
|
Blockmix; Used by SMix
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt_inline.py#L138-L152
|
[
"def salsa20_8(B, x, src, s_start, dest, d_start):\n \"\"\"Salsa20/8 http://en.wikipedia.org/wiki/Salsa20\"\"\"\n\n # Merged blockxor for speed\n for i in xrange(16):\n x[i] = B[i] = B[i] ^ src[s_start + i]\n\n # This is the actual Salsa 20/8: four identical double rounds\n for i in xrange(4):\n a = (x[0]+x[12]) & 0xffffffff\n b = (x[5]+x[1]) & 0xffffffff\n x[4] ^= (a << 7) | (a >> 25)\n x[9] ^= (b << 7) | (b >> 25)\n a = (x[10]+x[6]) & 0xffffffff\n b = (x[15]+x[11]) & 0xffffffff\n x[14] ^= (a << 7) | (a >> 25)\n x[3] ^= (b << 7) | (b >> 25)\n a = (x[4]+x[0]) & 0xffffffff\n b = (x[9]+x[5]) & 0xffffffff\n x[8] ^= (a << 9) | (a >> 23)\n x[13] ^= (b << 9) | (b >> 23)\n a = (x[14]+x[10]) & 0xffffffff\n b = (x[3]+x[15]) & 0xffffffff\n x[2] ^= (a << 9) | (a >> 23)\n x[7] ^= (b << 9) | (b >> 23)\n a = (x[8]+x[4]) & 0xffffffff\n b = (x[13]+x[9]) & 0xffffffff\n x[12] ^= (a << 13) | (a >> 19)\n x[1] ^= (b << 13) | (b >> 19)\n a = (x[2]+x[14]) & 0xffffffff\n b = (x[7]+x[3]) & 0xffffffff\n x[6] ^= (a << 13) | (a >> 19)\n x[11] ^= (b << 13) | (b >> 19)\n a = (x[12]+x[8]) & 0xffffffff\n b = (x[1]+x[13]) & 0xffffffff\n x[0] ^= (a << 18) | (a >> 14)\n x[5] ^= (b << 18) | (b >> 14)\n a = (x[6]+x[2]) & 0xffffffff\n b = (x[11]+x[7]) & 0xffffffff\n x[10] ^= (a << 18) | (a >> 14)\n x[15] ^= (b << 18) | (b >> 14)\n a = (x[0]+x[3]) & 0xffffffff\n b = (x[5]+x[4]) & 0xffffffff\n x[1] ^= (a << 7) | (a >> 25)\n x[6] ^= (b << 7) | (b >> 25)\n a = (x[10]+x[9]) & 0xffffffff\n b = (x[15]+x[14]) & 0xffffffff\n x[11] ^= (a << 7) | (a >> 25)\n x[12] ^= (b << 7) | (b >> 25)\n a = (x[1]+x[0]) & 0xffffffff\n b = (x[6]+x[5]) & 0xffffffff\n x[2] ^= (a << 9) | (a >> 23)\n x[7] ^= (b << 9) | (b >> 23)\n a = (x[11]+x[10]) & 0xffffffff\n b = (x[12]+x[15]) & 0xffffffff\n x[8] ^= (a << 9) | (a >> 23)\n x[13] ^= (b << 9) | (b >> 23)\n a = (x[2]+x[1]) & 0xffffffff\n b = (x[7]+x[6]) & 0xffffffff\n x[3] ^= (a << 13) | (a >> 19)\n x[4] ^= (b << 13) | (b >> 19)\n a = (x[8]+x[11]) & 0xffffffff\n b = (x[13]+x[12]) & 0xffffffff\n x[9] ^= (a << 13) | (a >> 19)\n x[14] ^= (b << 13) | (b >> 19)\n a = (x[3]+x[2]) & 0xffffffff\n b = (x[4]+x[7]) & 0xffffffff\n x[0] ^= (a << 18) | (a >> 14)\n x[5] ^= (b << 18) | (b >> 14)\n a = (x[9]+x[8]) & 0xffffffff\n b = (x[14]+x[13]) & 0xffffffff\n x[10] ^= (a << 18) | (a >> 14)\n x[15] ^= (b << 18) | (b >> 14)\n\n # While we are handling the data, write it to the correct dest.\n # The latter half is still part of salsa20\n for i in xrange(16):\n dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff\n"
] |
# Automatically generated file, see inline.py
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt_inline.py
|
smix
|
python
|
def smix(B, Bi, r, N, V, X):
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]
|
SMix; a specific case of ROMix based on Salsa20/8
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt_inline.py#L155-L169
|
[
"def blockxor(source, s_start, dest, d_start, length):\n for i in xrange(length):\n dest[d_start + i] ^= source[s_start + i]\n",
"def integerify(B, r):\n \"\"\"A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1\"\"\"\n\n Bi = (2 * r - 1) * 16\n return B[Bi]\n",
"def blockmix_salsa8(BY, Yi, r):\n \"\"\"Blockmix; Used by SMix\"\"\"\n\n start = (2 * r - 1) * 16\n X = BY[start:start+16] # BlockMix - 1\n tmp = [0]*16\n\n for i in xrange(2 * r): # BlockMix - 2\n #blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)\n salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)\n #array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4\n\n for i in xrange(r): # BlockMix - 6\n BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]\n BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]\n"
] |
# Automatically generated file, see inline.py
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt_inline.py
|
scrypt
|
python
|
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
|
Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt_inline.py#L172-L207
|
[
"def check_args(password, salt, N, r, p, olen=64):\n if not isinstance(password, bytes):\n raise TypeError('password must be a byte string')\n if not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if not isinstance(N, numbers.Integral):\n raise TypeError('N must be an integer')\n if not isinstance(r, numbers.Integral):\n raise TypeError('r must be an integer')\n if not isinstance(p, numbers.Integral):\n raise TypeError('p must be an integer')\n if not isinstance(olen, numbers.Integral):\n raise TypeError('length must be an integer')\n if N > 2**63:\n raise ValueError('N cannot be larger than 2**63')\n if (N & (N - 1)) or N < 2:\n raise ValueError('N must be a power of two larger than 1')\n if r <= 0:\n raise ValueError('r must be positive')\n if p <= 0:\n raise ValueError('p must be positive')\n if r * p >= 2**30:\n raise ValueError('r * p must be less than 2 ** 30')\n if olen <= 0:\n raise ValueError('length must be positive')\n",
"def smix(B, Bi, r, N, V, X):\n \"\"\"SMix; a specific case of ROMix based on Salsa20/8\"\"\"\n\n X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]\n\n for i in xrange(N): # ROMix - 2\n V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]\n blockmix_salsa8(X, 32 * r, r) # ROMix - 4\n\n for i in xrange(N): # ROMix - 6\n j = integerify(X, r) & (N - 1) # ROMix - 7\n blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)\n blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)\n\n B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]\n"
] |
# Automatically generated file, see inline.py
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt_inline.py
|
scrypt_mcf
|
python
|
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
|
Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt_inline.py#L210-L221
|
[
"def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,\n prefix=SCRYPT_MCF_PREFIX_DEFAULT):\n \"\"\"Derives a Modular Crypt Format hash using the scrypt KDF given\n\n Expects the signature:\n scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)\n\n If no salt is given, a random salt of 128+ bits is used. (Recommended.)\n \"\"\"\n if isinstance(password, unicode):\n password = password.encode('utf8')\n elif not isinstance(password, bytes):\n raise TypeError('password must be a unicode or byte string')\n if salt is not None and not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if salt is not None and not (1 <= len(salt) <= 16):\n raise ValueError('salt must be 1-16 bytes')\n if r > 255:\n raise ValueError('scrypt_mcf r out of range [1,255]')\n if p > 255:\n raise ValueError('scrypt_mcf p out of range [1,255]')\n if N > 2**31:\n raise ValueError('scrypt_mcf N out of range [2,2**31]')\n if b'\\0' in password:\n raise ValueError('scrypt_mcf password must not contain zero bytes')\n\n if prefix == SCRYPT_MCF_PREFIX_s1:\n if salt is None:\n salt = os.urandom(16)\n hash = scrypt(password, salt, N, r, p)\n return _scrypt_mcf_encode_s1(N, r, p, salt, hash)\n elif prefix == SCRYPT_MCF_PREFIX_7 or prefix == SCRYPT_MCF_PREFIX_ANY:\n if salt is None:\n salt = os.urandom(32)\n salt = _cb64enc(salt)\n hash = scrypt(password, salt, N, r, p, 32)\n return _scrypt_mcf_encode_7(N, r, p, salt, hash)\n else:\n raise ValueError(\"Unrecognized MCF format\")\n"
] |
# Automatically generated file, see inline.py
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
a = (x[0]+x[12]) & 0xffffffff
b = (x[5]+x[1]) & 0xffffffff
x[4] ^= (a << 7) | (a >> 25)
x[9] ^= (b << 7) | (b >> 25)
a = (x[10]+x[6]) & 0xffffffff
b = (x[15]+x[11]) & 0xffffffff
x[14] ^= (a << 7) | (a >> 25)
x[3] ^= (b << 7) | (b >> 25)
a = (x[4]+x[0]) & 0xffffffff
b = (x[9]+x[5]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[14]+x[10]) & 0xffffffff
b = (x[3]+x[15]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[8]+x[4]) & 0xffffffff
b = (x[13]+x[9]) & 0xffffffff
x[12] ^= (a << 13) | (a >> 19)
x[1] ^= (b << 13) | (b >> 19)
a = (x[2]+x[14]) & 0xffffffff
b = (x[7]+x[3]) & 0xffffffff
x[6] ^= (a << 13) | (a >> 19)
x[11] ^= (b << 13) | (b >> 19)
a = (x[12]+x[8]) & 0xffffffff
b = (x[1]+x[13]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[6]+x[2]) & 0xffffffff
b = (x[11]+x[7]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
a = (x[0]+x[3]) & 0xffffffff
b = (x[5]+x[4]) & 0xffffffff
x[1] ^= (a << 7) | (a >> 25)
x[6] ^= (b << 7) | (b >> 25)
a = (x[10]+x[9]) & 0xffffffff
b = (x[15]+x[14]) & 0xffffffff
x[11] ^= (a << 7) | (a >> 25)
x[12] ^= (b << 7) | (b >> 25)
a = (x[1]+x[0]) & 0xffffffff
b = (x[6]+x[5]) & 0xffffffff
x[2] ^= (a << 9) | (a >> 23)
x[7] ^= (b << 9) | (b >> 23)
a = (x[11]+x[10]) & 0xffffffff
b = (x[12]+x[15]) & 0xffffffff
x[8] ^= (a << 9) | (a >> 23)
x[13] ^= (b << 9) | (b >> 23)
a = (x[2]+x[1]) & 0xffffffff
b = (x[7]+x[6]) & 0xffffffff
x[3] ^= (a << 13) | (a >> 19)
x[4] ^= (b << 13) | (b >> 19)
a = (x[8]+x[11]) & 0xffffffff
b = (x[13]+x[12]) & 0xffffffff
x[9] ^= (a << 13) | (a >> 19)
x[14] ^= (b << 13) | (b >> 19)
a = (x[3]+x[2]) & 0xffffffff
b = (x[4]+x[7]) & 0xffffffff
x[0] ^= (a << 18) | (a >> 14)
x[5] ^= (b << 18) | (b >> 14)
a = (x[9]+x[8]) & 0xffffffff
b = (x[14]+x[13]) & 0xffffffff
x[10] ^= (a << 18) | (a >> 14)
x[15] ^= (b << 18) | (b >> 14)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
BY[i * 16:(i * 16)+(16)] = BY[Yi + (i * 2) * 16:(Yi + (i * 2) * 16)+(16)]
BY[(i + r) * 16:((i + r) * 16)+(16)] = BY[Yi + (i*2 + 1) * 16:(Yi + (i*2 + 1) * 16)+(16)]
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
X[0:(0)+(32 * r)] = B[Bi:(Bi)+(32 * r)]
for i in xrange(N): # ROMix - 2
V[i * (32 * r):(i * (32 * r))+(32 * r)] = X[0:(0)+(32 * r)]
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
B[Bi:(Bi)+(32 * r)] = X[0:(0)+(32 * r)]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/libsodium_load.py
|
get_libsodium
|
python
|
def get_libsodium():
'''Locate the libsodium C library'''
__SONAMES = (13, 10, 5, 4)
# Import libsodium from system
sys_sodium = ctypes.util.find_library('sodium')
if sys_sodium is None:
sys_sodium = ctypes.util.find_library('libsodium')
if sys_sodium:
try:
return ctypes.CDLL(sys_sodium)
except OSError:
pass
# Import from local path
if sys.platform.startswith('win'):
try:
return ctypes.cdll.LoadLibrary('libsodium')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium-{0}'.format(soname_ver)
)
except OSError:
pass
elif sys.platform.startswith('darwin'):
try:
return ctypes.cdll.LoadLibrary('libsodium.dylib')
except OSError:
try:
libidx = __file__.find('lib')
if libidx > 0:
libpath = __file__[0:libidx+3] + '/libsodium.dylib'
return ctypes.cdll.LoadLibrary(libpath)
except OSError:
pass
else:
try:
return ctypes.cdll.LoadLibrary('libsodium.so')
except OSError:
pass
for soname_ver in __SONAMES:
try:
return ctypes.cdll.LoadLibrary(
'libsodium.so.{0}'.format(soname_ver)
)
except OSError:
pass
|
Locate the libsodium C library
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/libsodium_load.py#L19-L71
| null |
# Copyright (c) 2015, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import ctypes.util
import sys
|
jvarho/pylibscrypt
|
pylibscrypt/pylibsodium.py
|
scrypt
|
python
|
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw
|
Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibsodium.py#L98-L138
|
[
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n\n check_args(password, salt, N, r, p, olen)\n\n # Everything is lists of 32-bit uints for all but pbkdf2\n try:\n B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)\n B = list(struct.unpack('<%dI' % (len(B) // 4), B))\n XY = [0] * (64 * r)\n V = [0] * (32 * r * N)\n except (MemoryError, OverflowError):\n raise ValueError(\"scrypt parameters don't fit in memory\")\n\n for i in xrange(p):\n smix(B, i * 32 * r, r, N, V, XY)\n\n B = struct.pack('<%dI' % len(B), *B)\n return _pbkdf2('sha256', password, B, 1, olen)\n",
"def check_args(password, salt, N, r, p, olen=64):\n if not isinstance(password, bytes):\n raise TypeError('password must be a byte string')\n if not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if not isinstance(N, numbers.Integral):\n raise TypeError('N must be an integer')\n if not isinstance(r, numbers.Integral):\n raise TypeError('r must be an integer')\n if not isinstance(p, numbers.Integral):\n raise TypeError('p must be an integer')\n if not isinstance(olen, numbers.Integral):\n raise TypeError('length must be an integer')\n if N > 2**63:\n raise ValueError('N cannot be larger than 2**63')\n if (N & (N - 1)) or N < 2:\n raise ValueError('N must be a power of two larger than 1')\n if r <= 0:\n raise ValueError('r must be positive')\n if p <= 0:\n raise ValueError('p must be positive')\n if r * p >= 2**30:\n raise ValueError('r * p must be less than 2 ** 30')\n if olen <= 0:\n raise ValueError('length must be positive')\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libsodium"""
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32, c_void_p
import platform
from warnings import catch_warnings, filterwarnings
from . import mcf as mcf_mod
from . import libsodium_load
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import pypyscrypt_inline as scr_mod
_lib = libsodium_load.get_libsodium()
if _lib is None:
raise ImportError('Unable to load libsodium')
try:
_scrypt_ll = _lib.crypto_pwhash_scryptsalsa208sha256_ll
_scrypt_ll.argtypes = [
c_void_p, # passwd
c_size_t, # passwdlen
c_void_p, # salt
c_size_t, # saltlen
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_void_p, # buf
c_size_t, # buflen
]
except AttributeError:
_scrypt_ll = None
try:
_scrypt = _lib.crypto_pwhash_scryptsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptsalsa208sha256_saltbytes()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
try:
_scrypt = _lib.crypto_pwhash_scryptxsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptxsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptxsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptxsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptxsalsa208sha256_saltbytes
_scrypt_salt = _scrypt_salt()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
if not _scrypt_ll:
raise ImportError('Incompatible libsodium')
_scrypt.argtypes = [
c_void_p, # out
c_uint64, # outlen
c_void_p, # passwd
c_uint64, # passwdlen
c_void_p, # salt
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str.argtypes = [
c_void_p, # out (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str_chk.argtypes = [
c_char_p, # str (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
]
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
if __name__ == "__main__":
import sys
from . import tests
try:
from . import pylibscrypt
scr_mod = pylibscrypt
except ImportError:
pass
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pylibsodium.py
|
scrypt_mcf
|
python
|
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash)
|
Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibsodium.py#L141-L182
|
[
"def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,\n prefix=SCRYPT_MCF_PREFIX_DEFAULT):\n \"\"\"Derives a Modular Crypt Format hash using the scrypt KDF given\n\n Expects the signature:\n scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)\n\n If no salt is given, a random salt of 128+ bits is used. (Recommended.)\n \"\"\"\n if isinstance(password, unicode):\n password = password.encode('utf8')\n elif not isinstance(password, bytes):\n raise TypeError('password must be a unicode or byte string')\n if salt is not None and not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if salt is not None and not (1 <= len(salt) <= 16):\n raise ValueError('salt must be 1-16 bytes')\n if r > 255:\n raise ValueError('scrypt_mcf r out of range [1,255]')\n if p > 255:\n raise ValueError('scrypt_mcf p out of range [1,255]')\n if N > 2**31:\n raise ValueError('scrypt_mcf N out of range [2,2**31]')\n if b'\\0' in password:\n raise ValueError('scrypt_mcf password must not contain zero bytes')\n\n if prefix == SCRYPT_MCF_PREFIX_s1:\n if salt is None:\n salt = os.urandom(16)\n hash = scrypt(password, salt, N, r, p)\n return _scrypt_mcf_encode_s1(N, r, p, salt, hash)\n elif prefix == SCRYPT_MCF_PREFIX_7 or prefix == SCRYPT_MCF_PREFIX_ANY:\n if salt is None:\n salt = os.urandom(32)\n salt = _cb64enc(salt)\n hash = scrypt(password, salt, N, r, p, 32)\n return _scrypt_mcf_encode_7(N, r, p, salt, hash)\n else:\n raise ValueError(\"Unrecognized MCF format\")\n",
"def _scrypt_mcf_encode_s1(N, r, p, salt, hash):\n h64 = base64.b64encode(hash)\n s64 = base64.b64encode(salt)\n\n t = 1\n while 2**t < N:\n t += 1\n params = p + (r << 8) + (t << 16)\n\n return (\n b'$s1' +\n ('$%06x' % params).encode() +\n b'$' + s64 +\n b'$' + h64\n )\n",
"def _scrypt_mcf_decode_7(mcf):\n s = mcf.split(b'$')\n if not (mcf.startswith(b'$7$') and len(s) == 4):\n return None\n\n s64 = bytearray(s[2])\n h64 = bytearray(s[3])\n try:\n N = 2 ** _icb64[s64[0]]\n r = (_icb64[s64[1]] + (_icb64[s64[2]] << 6) + (_icb64[s64[3]] << 12) +\n (_icb64[s64[4]] << 18) + (_icb64[s64[5]] << 24))\n p = (_icb64[s64[6]] + (_icb64[s64[7]] << 6) + (_icb64[s64[8]] << 12) +\n (_icb64[s64[9]] << 18) + (_icb64[s64[10]] << 24))\n salt = bytes(s64[11:])\n hash = bytes(_cb64dec(h64))\n except (IndexError, TypeError):\n raise ValueError('Unrecognized MCF format')\n\n return N, r, p, salt, hash, len(hash)\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libsodium"""
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32, c_void_p
import platform
from warnings import catch_warnings, filterwarnings
from . import mcf as mcf_mod
from . import libsodium_load
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import pypyscrypt_inline as scr_mod
_lib = libsodium_load.get_libsodium()
if _lib is None:
raise ImportError('Unable to load libsodium')
try:
_scrypt_ll = _lib.crypto_pwhash_scryptsalsa208sha256_ll
_scrypt_ll.argtypes = [
c_void_p, # passwd
c_size_t, # passwdlen
c_void_p, # salt
c_size_t, # saltlen
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_void_p, # buf
c_size_t, # buflen
]
except AttributeError:
_scrypt_ll = None
try:
_scrypt = _lib.crypto_pwhash_scryptsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptsalsa208sha256_saltbytes()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
try:
_scrypt = _lib.crypto_pwhash_scryptxsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptxsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptxsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptxsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptxsalsa208sha256_saltbytes
_scrypt_salt = _scrypt_salt()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
if not _scrypt_ll:
raise ImportError('Incompatible libsodium')
_scrypt.argtypes = [
c_void_p, # out
c_uint64, # outlen
c_void_p, # passwd
c_uint64, # passwdlen
c_void_p, # salt
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str.argtypes = [
c_void_p, # out (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str_chk.argtypes = [
c_char_p, # str (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
if __name__ == "__main__":
import sys
from . import tests
try:
from . import pylibscrypt
scr_mod = pylibscrypt
except ImportError:
pass
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pylibsodium.py
|
scrypt_mcf_check
|
python
|
def scrypt_mcf_check(mcf, password):
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if mcf_mod._scrypt_mcf_7_is_standard(mcf) and not _scrypt_ll:
return _scrypt_str_chk(mcf, password, len(password)) == 0
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
|
Returns True if the password matches the given MCF hash
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibsodium.py#L185-L195
|
[
"def scrypt_mcf_check(scrypt, mcf, password):\n \"\"\"Returns True if the password matches the given MCF hash\n\n Supports both the libscrypt $s1$ format and the $7$ format.\n \"\"\"\n if not isinstance(mcf, bytes):\n raise TypeError('MCF must be a byte string')\n if isinstance(password, unicode):\n password = password.encode('utf8')\n elif not isinstance(password, bytes):\n raise TypeError('password must be a unicode or byte string')\n\n N, r, p, salt, hash, hlen = _scrypt_mcf_decode(mcf)\n h = scrypt(password, salt, N=N, r=r, p=p, olen=hlen)\n cmp = 0\n for i, j in zip(bytearray(h), bytearray(hash)):\n cmp |= i ^ j\n return cmp == 0\n",
"def _scrypt_mcf_7_is_standard(mcf):\n params = _scrypt_mcf_decode_7(mcf)\n if params is None:\n return False\n N, r, p, salt, hash, hlen = params\n return len(salt) == 43 and hlen == 32\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libsodium"""
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32, c_void_p
import platform
from warnings import catch_warnings, filterwarnings
from . import mcf as mcf_mod
from . import libsodium_load
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import pypyscrypt_inline as scr_mod
_lib = libsodium_load.get_libsodium()
if _lib is None:
raise ImportError('Unable to load libsodium')
try:
_scrypt_ll = _lib.crypto_pwhash_scryptsalsa208sha256_ll
_scrypt_ll.argtypes = [
c_void_p, # passwd
c_size_t, # passwdlen
c_void_p, # salt
c_size_t, # saltlen
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_void_p, # buf
c_size_t, # buflen
]
except AttributeError:
_scrypt_ll = None
try:
_scrypt = _lib.crypto_pwhash_scryptsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptsalsa208sha256_saltbytes()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
try:
_scrypt = _lib.crypto_pwhash_scryptxsalsa208sha256
_scrypt_str = _lib.crypto_pwhash_scryptxsalsa208sha256_str
_scrypt_str_chk = _lib.crypto_pwhash_scryptxsalsa208sha256_str_verify
_scrypt_str_bytes = _lib.crypto_pwhash_scryptxsalsa208sha256_strbytes()
_scrypt_salt = _lib.crypto_pwhash_scryptxsalsa208sha256_saltbytes
_scrypt_salt = _scrypt_salt()
if _scrypt_str_bytes != 102 and not _scrypt_ll:
raise ImportError('Incompatible libsodium')
except AttributeError:
if not _scrypt_ll:
raise ImportError('Incompatible libsodium')
_scrypt.argtypes = [
c_void_p, # out
c_uint64, # outlen
c_void_p, # passwd
c_uint64, # passwdlen
c_void_p, # salt
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str.argtypes = [
c_void_p, # out (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
c_uint64, # opslimit
c_size_t, # memlimit
]
_scrypt_str_chk.argtypes = [
c_char_p, # str (102 bytes)
c_void_p, # passwd
c_uint64, # passwdlen
]
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
if _scrypt_ll:
out = ctypes.create_string_buffer(olen)
if _scrypt_ll(password, len(password), salt, len(salt),
N, r, p, out, olen):
raise ValueError
return out.raw
if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):
return scr_mod.scrypt(password, salt, N, r, p, olen)
s = next(i for i in range(1, 64) if 2**i == N)
t = next(i for i in range(0, 30) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
if s > 53 or t + s > 58:
raise ValueError
out = ctypes.create_string_buffer(olen)
if _scrypt(out, olen, password, len(password), salt, o, m) != 0:
raise ValueError
return out.raw
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if N < 2 or (N & (N - 1)):
raise ValueError('scrypt N must be a power of 2 greater than 1')
if p > 255 or p < 1:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if (salt is not None or r != 8 or (p & (p - 1)) or (N*p <= 512) or
prefix not in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_ANY) or
_scrypt_ll):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
s = next(i for i in range(1, 32) if 2**i == N)
t = next(i for i in range(0, 8) if 2**i == p)
m = 2**(10 + s)
o = 2**(5 + t + s)
mcf = ctypes.create_string_buffer(102)
if _scrypt_str(mcf, password, len(password), o, m) != 0:
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if prefix in (SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_ANY):
return mcf.raw.strip(b'\0')
_N, _r, _p, salt, hash, olen = mcf_mod._scrypt_mcf_decode_7(mcf.raw[:-1])
assert _N == N and _r == r and _p == p, (_N, _r, _p, N, r, p, o, m)
return mcf_mod._scrypt_mcf_encode_s1(N, r, p, salt, hash)
if __name__ == "__main__":
import sys
from . import tests
try:
from . import pylibscrypt
scr_mod = pylibscrypt
except ImportError:
pass
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/mcf.py
|
scrypt_mcf
|
python
|
def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if salt is not None and not isinstance(salt, bytes):
raise TypeError('salt must be a byte string')
if salt is not None and not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if r > 255:
raise ValueError('scrypt_mcf r out of range [1,255]')
if p > 255:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if b'\0' in password:
raise ValueError('scrypt_mcf password must not contain zero bytes')
if prefix == SCRYPT_MCF_PREFIX_s1:
if salt is None:
salt = os.urandom(16)
hash = scrypt(password, salt, N, r, p)
return _scrypt_mcf_encode_s1(N, r, p, salt, hash)
elif prefix == SCRYPT_MCF_PREFIX_7 or prefix == SCRYPT_MCF_PREFIX_ANY:
if salt is None:
salt = os.urandom(32)
salt = _cb64enc(salt)
hash = scrypt(password, salt, N, r, p, 32)
return _scrypt_mcf_encode_7(N, r, p, salt, hash)
else:
raise ValueError("Unrecognized MCF format")
|
Derives a Modular Crypt Format hash using the scrypt KDF given
Expects the signature:
scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/mcf.py#L199-L237
|
[
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n\n check_args(password, salt, N, r, p, olen)\n\n # Everything is lists of 32-bit uints for all but pbkdf2\n try:\n B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)\n B = list(struct.unpack('<%dI' % (len(B) // 4), B))\n XY = [0] * (64 * r)\n V = [0] * (32 * r * N)\n except (MemoryError, OverflowError):\n raise ValueError(\"scrypt parameters don't fit in memory\")\n\n for i in xrange(p):\n smix(B, i * 32 * r, r, N, V, XY)\n\n B = struct.pack('<%dI' % len(B), *B)\n return _pbkdf2('sha256', password, B, 1, olen)\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n\n check_args(password, salt, N, r, p, olen)\n\n # Everything is lists of 32-bit uints for all but pbkdf2\n try:\n B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)\n B = list(struct.unpack('<%dI' % (len(B) // 4), B))\n XY = [0] * (64 * r)\n V = [0] * (32 * r * N)\n except (MemoryError, OverflowError):\n raise ValueError(\"scrypt parameters don't fit in memory\")\n\n for i in xrange(p):\n smix(B, i * 32 * r, r, N, V, XY)\n\n B = struct.pack('<%dI' % len(B), *B)\n return _pbkdf2('sha256', password, B, 1, olen)\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n check_args(password, salt, N, r, p, olen)\n\n # Set the memory required based on parameter values\n m = 128 * r * (N + p + 2)\n\n try:\n return _scrypt(\n password=password, salt=salt, n=N, r=r, p=p, maxmem=m, dklen=olen)\n except:\n raise ValueError\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n check_args(password, salt, N, r, p, olen)\n\n try:\n return _scrypt(password=password, salt=salt, N=N, r=r, p=p, buflen=olen)\n except:\n raise ValueError\n",
"def _scrypt_mcf_encode_s1(N, r, p, salt, hash):\n h64 = base64.b64encode(hash)\n s64 = base64.b64encode(salt)\n\n t = 1\n while 2**t < N:\n t += 1\n params = p + (r << 8) + (t << 16)\n\n return (\n b'$s1' +\n ('$%06x' % params).encode() +\n b'$' + s64 +\n b'$' + h64\n )\n",
"def _cb64enc(arr):\n arr = bytearray(arr)\n out = bytearray()\n val = bits = pos = 0\n for b in arr:\n val += b << bits\n bits += 8\n while bits >= 0:\n out.append(_cb64a[val & 0x3f])\n bits -= 6\n val = val >> 6\n return bytes(out)\n",
"def _scrypt_mcf_encode_7(N, r, p, salt, hash):\n t = 1\n while 2**t < N:\n t += 1\n return (\n b'$7$' +\n # N\n _cb64[t::64] +\n # r\n _cb64[r & 0x3f::64] + _cb64[(r >> 6) & 0x3f::64] +\n _cb64[(r >> 12) & 0x3f::64] + _cb64[(r >> 18) & 0x3f::64] +\n _cb64[(r >> 24) & 0x3f::64] +\n # p\n _cb64[p & 0x3f::64] + _cb64[(p >> 6) & 0x3f::64] +\n _cb64[(p >> 12) & 0x3f::64] + _cb64[(p >> 18) & 0x3f::64] +\n _cb64[(p >> 24) & 0x3f::64] +\n # rest\n salt +\n b'$' + _cb64enc(hash)\n )\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Modular Crypt Format support for scrypt
Compatible with libscrypt scrypt_mcf_check also supports the $7$ format.
libscrypt format:
$s1$NNrrpp$salt$hash
NN - hex encoded N log2 (two hex digits)
rr - hex encoded r in 1-255
pp - hex encoded p in 1-255
salt - base64 encoded salt 1-16 bytes decoded
hash - base64 encoded 64-byte scrypt hash
$7$ format:
$7$Nrrrrrpppppsalt$hash
N - crypt base64 N log2
rrrrr - crypt base64 r (little-endian 30 bits)
ppppp - crypt base64 p (little-endian 30 bits)
salt - raw salt (0-43 bytes that should be limited to crypt base64)
hash - crypt base64 encoded 32-byte scrypt hash (43 bytes)
(crypt base64 is base64 with the alphabet: ./0-9A-Za-z)
When reading, we are more lax, allowing salts and hashes to be longer and
incorrectly encoded, since the worst that can happen is that the password does
not verify.
"""
import base64
import binascii
import os
import struct
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, unicode)
def _scrypt_mcf_encode_s1(N, r, p, salt, hash):
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
t = 1
while 2**t < N:
t += 1
params = p + (r << 8) + (t << 16)
return (
b'$s1' +
('$%06x' % params).encode() +
b'$' + s64 +
b'$' + h64
)
def _b64decode(b64):
for b in (b64, b64 + b'=', b64 + b'=='):
try:
return base64.b64decode(b)
except (TypeError, binascii.Error):
pass
raise ValueError('Incorrect base64 in MCF')
def _scrypt_mcf_decode_s1(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$s1$') and len(s) == 5):
return None
params, s64, h64 = s[2:]
params = base64.b16decode(params, True)
salt = _b64decode(s64)
hash = _b64decode(h64)
if len(params) != 3:
raise ValueError('Unrecognized MCF parameters')
t, r, p = struct.unpack('3B', params)
N = 2 ** t
return N, r, p, salt, hash, len(hash)
# Crypt base 64
_cb64 = b'./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_cb64a = bytearray(_cb64)
_icb64 = (
[None] * 46 +
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, None, None, None, None, None,
None, None, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, None, None, None,
None, None, None, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63
] +
[None] * 133
)
def _cb64enc(arr):
arr = bytearray(arr)
out = bytearray()
val = bits = pos = 0
for b in arr:
val += b << bits
bits += 8
while bits >= 0:
out.append(_cb64a[val & 0x3f])
bits -= 6
val = val >> 6
return bytes(out)
def _scrypt_mcf_encode_7(N, r, p, salt, hash):
t = 1
while 2**t < N:
t += 1
return (
b'$7$' +
# N
_cb64[t::64] +
# r
_cb64[r & 0x3f::64] + _cb64[(r >> 6) & 0x3f::64] +
_cb64[(r >> 12) & 0x3f::64] + _cb64[(r >> 18) & 0x3f::64] +
_cb64[(r >> 24) & 0x3f::64] +
# p
_cb64[p & 0x3f::64] + _cb64[(p >> 6) & 0x3f::64] +
_cb64[(p >> 12) & 0x3f::64] + _cb64[(p >> 18) & 0x3f::64] +
_cb64[(p >> 24) & 0x3f::64] +
# rest
salt +
b'$' + _cb64enc(hash)
)
def _cb64dec(arr):
out = bytearray()
val = bits = pos = 0
for b in arr:
val += _icb64[b] << bits
bits += 6
if bits >= 8:
out.append(val & 0xff)
bits -= 8
val >>= 8
return out
def _scrypt_mcf_decode_7(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$7$') and len(s) == 4):
return None
s64 = bytearray(s[2])
h64 = bytearray(s[3])
try:
N = 2 ** _icb64[s64[0]]
r = (_icb64[s64[1]] + (_icb64[s64[2]] << 6) + (_icb64[s64[3]] << 12) +
(_icb64[s64[4]] << 18) + (_icb64[s64[5]] << 24))
p = (_icb64[s64[6]] + (_icb64[s64[7]] << 6) + (_icb64[s64[8]] << 12) +
(_icb64[s64[9]] << 18) + (_icb64[s64[10]] << 24))
salt = bytes(s64[11:])
hash = bytes(_cb64dec(h64))
except (IndexError, TypeError):
raise ValueError('Unrecognized MCF format')
return N, r, p, salt, hash, len(hash)
def _scrypt_mcf_7_is_standard(mcf):
params = _scrypt_mcf_decode_7(mcf)
if params is None:
return False
N, r, p, salt, hash, hlen = params
return len(salt) == 43 and hlen == 32
def _scrypt_mcf_decode(mcf):
params = _scrypt_mcf_decode_s1(mcf)
if params is None:
params = _scrypt_mcf_decode_7(mcf)
if params is None:
raise ValueError('Unrecognized MCF hash')
return params
def scrypt_mcf_check(scrypt, mcf, password):
"""Returns True if the password matches the given MCF hash
Supports both the libscrypt $s1$ format and the $7$ format.
"""
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
N, r, p, salt, hash, hlen = _scrypt_mcf_decode(mcf)
h = scrypt(password, salt, N=N, r=r, p=p, olen=hlen)
cmp = 0
for i, j in zip(bytearray(h), bytearray(hash)):
cmp |= i ^ j
return cmp == 0
|
jvarho/pylibscrypt
|
pylibscrypt/mcf.py
|
scrypt_mcf_check
|
python
|
def scrypt_mcf_check(scrypt, mcf, password):
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
N, r, p, salt, hash, hlen = _scrypt_mcf_decode(mcf)
h = scrypt(password, salt, N=N, r=r, p=p, olen=hlen)
cmp = 0
for i, j in zip(bytearray(h), bytearray(hash)):
cmp |= i ^ j
return cmp == 0
|
Returns True if the password matches the given MCF hash
Supports both the libscrypt $s1$ format and the $7$ format.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/mcf.py#L240-L257
|
[
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n\n check_args(password, salt, N, r, p, olen)\n\n # Everything is lists of 32-bit uints for all but pbkdf2\n try:\n B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)\n B = list(struct.unpack('<%dI' % (len(B) // 4), B))\n XY = [0] * (64 * r)\n V = [0] * (32 * r * N)\n except (MemoryError, OverflowError):\n raise ValueError(\"scrypt parameters don't fit in memory\")\n\n for i in xrange(p):\n smix(B, i * 32 * r, r, N, V, XY)\n\n B = struct.pack('<%dI' % len(B), *B)\n return _pbkdf2('sha256', password, B, 1, olen)\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n\n check_args(password, salt, N, r, p, olen)\n\n # Everything is lists of 32-bit uints for all but pbkdf2\n try:\n B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)\n B = list(struct.unpack('<%dI' % (len(B) // 4), B))\n XY = [0] * (64 * r)\n V = [0] * (32 * r * N)\n except (MemoryError, OverflowError):\n raise ValueError(\"scrypt parameters don't fit in memory\")\n\n for i in xrange(p):\n smix(B, i * 32 * r, r, N, V, XY)\n\n B = struct.pack('<%dI' % len(B), *B)\n return _pbkdf2('sha256', password, B, 1, olen)\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n check_args(password, salt, N, r, p, olen)\n\n if _scrypt_ll:\n out = ctypes.create_string_buffer(olen)\n if _scrypt_ll(password, len(password), salt, len(salt),\n N, r, p, out, olen):\n raise ValueError\n return out.raw\n\n if len(salt) != _scrypt_salt or r != 8 or (p & (p - 1)) or (N*p <= 512):\n return scr_mod.scrypt(password, salt, N, r, p, olen)\n\n s = next(i for i in range(1, 64) if 2**i == N)\n t = next(i for i in range(0, 30) if 2**i == p)\n m = 2**(10 + s)\n o = 2**(5 + t + s)\n if s > 53 or t + s > 58:\n raise ValueError\n out = ctypes.create_string_buffer(olen)\n if _scrypt(out, olen, password, len(password), salt, o, m) != 0:\n raise ValueError\n return out.raw\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n check_args(password, salt, N, r, p, olen)\n\n # Set the memory required based on parameter values\n m = 128 * r * (N + p + 2)\n\n try:\n return _scrypt(\n password=password, salt=salt, n=N, r=r, p=p, maxmem=m, dklen=olen)\n except:\n raise ValueError\n",
"def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):\n \"\"\"Returns a key derived using the scrypt key-derivarion function\n\n N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)\n r and p must be positive numbers such that r * p < 2 ** 30\n\n The default values are:\n N -- 2**14 (~16k)\n r -- 8\n p -- 1\n\n Memory usage is proportional to N*r. Defaults require about 16 MiB.\n Time taken is proportional to N*p. Defaults take <100ms of a recent x86.\n\n The last one differs from libscrypt defaults, but matches the 'interactive'\n work factor from the original paper. For long term storage where runtime of\n key derivation is not a problem, you could use 16 as in libscrypt or better\n yet increase N if memory is plentiful.\n \"\"\"\n check_args(password, salt, N, r, p, olen)\n\n try:\n return _scrypt(password=password, salt=salt, N=N, r=r, p=p, buflen=olen)\n except:\n raise ValueError\n",
"def _scrypt_mcf_decode(mcf):\n params = _scrypt_mcf_decode_s1(mcf)\n if params is None:\n params = _scrypt_mcf_decode_7(mcf)\n if params is None:\n raise ValueError('Unrecognized MCF hash')\n return params\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Modular Crypt Format support for scrypt
Compatible with libscrypt scrypt_mcf_check also supports the $7$ format.
libscrypt format:
$s1$NNrrpp$salt$hash
NN - hex encoded N log2 (two hex digits)
rr - hex encoded r in 1-255
pp - hex encoded p in 1-255
salt - base64 encoded salt 1-16 bytes decoded
hash - base64 encoded 64-byte scrypt hash
$7$ format:
$7$Nrrrrrpppppsalt$hash
N - crypt base64 N log2
rrrrr - crypt base64 r (little-endian 30 bits)
ppppp - crypt base64 p (little-endian 30 bits)
salt - raw salt (0-43 bytes that should be limited to crypt base64)
hash - crypt base64 encoded 32-byte scrypt hash (43 bytes)
(crypt base64 is base64 with the alphabet: ./0-9A-Za-z)
When reading, we are more lax, allowing salts and hashes to be longer and
incorrectly encoded, since the worst that can happen is that the password does
not verify.
"""
import base64
import binascii
import os
import struct
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_7, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, unicode)
def _scrypt_mcf_encode_s1(N, r, p, salt, hash):
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
t = 1
while 2**t < N:
t += 1
params = p + (r << 8) + (t << 16)
return (
b'$s1' +
('$%06x' % params).encode() +
b'$' + s64 +
b'$' + h64
)
def _b64decode(b64):
for b in (b64, b64 + b'=', b64 + b'=='):
try:
return base64.b64decode(b)
except (TypeError, binascii.Error):
pass
raise ValueError('Incorrect base64 in MCF')
def _scrypt_mcf_decode_s1(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$s1$') and len(s) == 5):
return None
params, s64, h64 = s[2:]
params = base64.b16decode(params, True)
salt = _b64decode(s64)
hash = _b64decode(h64)
if len(params) != 3:
raise ValueError('Unrecognized MCF parameters')
t, r, p = struct.unpack('3B', params)
N = 2 ** t
return N, r, p, salt, hash, len(hash)
# Crypt base 64
_cb64 = b'./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_cb64a = bytearray(_cb64)
_icb64 = (
[None] * 46 +
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, None, None, None, None, None,
None, None, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, None, None, None,
None, None, None, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63
] +
[None] * 133
)
def _cb64enc(arr):
arr = bytearray(arr)
out = bytearray()
val = bits = pos = 0
for b in arr:
val += b << bits
bits += 8
while bits >= 0:
out.append(_cb64a[val & 0x3f])
bits -= 6
val = val >> 6
return bytes(out)
def _scrypt_mcf_encode_7(N, r, p, salt, hash):
t = 1
while 2**t < N:
t += 1
return (
b'$7$' +
# N
_cb64[t::64] +
# r
_cb64[r & 0x3f::64] + _cb64[(r >> 6) & 0x3f::64] +
_cb64[(r >> 12) & 0x3f::64] + _cb64[(r >> 18) & 0x3f::64] +
_cb64[(r >> 24) & 0x3f::64] +
# p
_cb64[p & 0x3f::64] + _cb64[(p >> 6) & 0x3f::64] +
_cb64[(p >> 12) & 0x3f::64] + _cb64[(p >> 18) & 0x3f::64] +
_cb64[(p >> 24) & 0x3f::64] +
# rest
salt +
b'$' + _cb64enc(hash)
)
def _cb64dec(arr):
out = bytearray()
val = bits = pos = 0
for b in arr:
val += _icb64[b] << bits
bits += 6
if bits >= 8:
out.append(val & 0xff)
bits -= 8
val >>= 8
return out
def _scrypt_mcf_decode_7(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$7$') and len(s) == 4):
return None
s64 = bytearray(s[2])
h64 = bytearray(s[3])
try:
N = 2 ** _icb64[s64[0]]
r = (_icb64[s64[1]] + (_icb64[s64[2]] << 6) + (_icb64[s64[3]] << 12) +
(_icb64[s64[4]] << 18) + (_icb64[s64[5]] << 24))
p = (_icb64[s64[6]] + (_icb64[s64[7]] << 6) + (_icb64[s64[8]] << 12) +
(_icb64[s64[9]] << 18) + (_icb64[s64[10]] << 24))
salt = bytes(s64[11:])
hash = bytes(_cb64dec(h64))
except (IndexError, TypeError):
raise ValueError('Unrecognized MCF format')
return N, r, p, salt, hash, len(hash)
def _scrypt_mcf_7_is_standard(mcf):
params = _scrypt_mcf_decode_7(mcf)
if params is None:
return False
N, r, p, salt, hash, hlen = params
return len(salt) == 43 and hlen == 32
def _scrypt_mcf_decode(mcf):
params = _scrypt_mcf_decode_s1(mcf)
if params is None:
params = _scrypt_mcf_decode_7(mcf)
if params is None:
raise ValueError('Unrecognized MCF hash')
return params
def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF given
Expects the signature:
scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if salt is not None and not isinstance(salt, bytes):
raise TypeError('salt must be a byte string')
if salt is not None and not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if r > 255:
raise ValueError('scrypt_mcf r out of range [1,255]')
if p > 255:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
if b'\0' in password:
raise ValueError('scrypt_mcf password must not contain zero bytes')
if prefix == SCRYPT_MCF_PREFIX_s1:
if salt is None:
salt = os.urandom(16)
hash = scrypt(password, salt, N, r, p)
return _scrypt_mcf_encode_s1(N, r, p, salt, hash)
elif prefix == SCRYPT_MCF_PREFIX_7 or prefix == SCRYPT_MCF_PREFIX_ANY:
if salt is None:
salt = os.urandom(32)
salt = _cb64enc(salt)
hash = scrypt(password, salt, N, r, p, 32)
return _scrypt_mcf_encode_7(N, r, p, salt, hash)
else:
raise ValueError("Unrecognized MCF format")
|
jvarho/pylibscrypt
|
pylibscrypt/pyscrypt.py
|
scrypt
|
python
|
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
check_args(password, salt, N, r, p, olen)
try:
return _scrypt(password=password, salt=salt, N=N, r=r, p=p, buflen=olen)
except:
raise ValueError
|
Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pyscrypt.py#L37-L61
|
[
"def check_args(password, salt, N, r, p, olen=64):\n if not isinstance(password, bytes):\n raise TypeError('password must be a byte string')\n if not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if not isinstance(N, numbers.Integral):\n raise TypeError('N must be an integer')\n if not isinstance(r, numbers.Integral):\n raise TypeError('r must be an integer')\n if not isinstance(p, numbers.Integral):\n raise TypeError('p must be an integer')\n if not isinstance(olen, numbers.Integral):\n raise TypeError('length must be an integer')\n if N > 2**63:\n raise ValueError('N cannot be larger than 2**63')\n if (N & (N - 1)) or N < 2:\n raise ValueError('N must be a power of two larger than 1')\n if r <= 0:\n raise ValueError('r must be positive')\n if p <= 0:\n raise ValueError('p must be positive')\n if r * p >= 2**30:\n raise ValueError('r * p must be less than 2 ** 30')\n if olen <= 0:\n raise ValueError('length must be positive')\n"
] |
# Copyright (c) 2014-2016, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into the 'scrypt' python module"""
try:
from scrypt import hash as _scrypt
except ImportError:
raise
except:
raise ImportError('scrypt module failed to import')
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, check_args)
# scrypt < 0.6 doesn't support hash length
try:
_scrypt(b'password', b'NaCl', N=2, r=1, p=1, buflen=42)
except TypeError:
raise ImportError('scrypt module version unsupported, 0.6+ required')
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/hashlibscrypt.py
|
scrypt
|
python
|
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
check_args(password, salt, N, r, p, olen)
# Set the memory required based on parameter values
m = 128 * r * (N + p + 2)
try:
return _scrypt(
password=password, salt=salt, n=N, r=r, p=p, maxmem=m, dklen=olen)
except:
raise ValueError
|
Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/hashlibscrypt.py#L30-L58
|
[
"def check_args(password, salt, N, r, p, olen=64):\n if not isinstance(password, bytes):\n raise TypeError('password must be a byte string')\n if not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if not isinstance(N, numbers.Integral):\n raise TypeError('N must be an integer')\n if not isinstance(r, numbers.Integral):\n raise TypeError('r must be an integer')\n if not isinstance(p, numbers.Integral):\n raise TypeError('p must be an integer')\n if not isinstance(olen, numbers.Integral):\n raise TypeError('length must be an integer')\n if N > 2**63:\n raise ValueError('N cannot be larger than 2**63')\n if (N & (N - 1)) or N < 2:\n raise ValueError('N must be a power of two larger than 1')\n if r <= 0:\n raise ValueError('r must be positive')\n if p <= 0:\n raise ValueError('p must be positive')\n if r * p >= 2**30:\n raise ValueError('r * p must be less than 2 ** 30')\n if olen <= 0:\n raise ValueError('length must be positive')\n"
] |
# Copyright (c) 2016-2017, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls scrypt from hashlib"""
try:
from hashlib import scrypt as _scrypt
except ImportError:
raise
except:
raise ImportError('hashlib.scrypt failed to import')
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, check_args)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt.py
|
R
|
python
|
def R(X, destination, a1, a2, b):
a = (X[a1] + X[a2]) & 0xffffffff
X[destination] ^= ((a << b) | (a >> (32 - b)))
|
A single Salsa20 row operation
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt.py#L60-L64
| null |
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def array_overwrite(source, s_start, dest, d_start, length):
dest[d_start:d_start + length] = source[s_start:s_start + length]
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def salsa20_8(B, x, src, s_start, dest, d_start):
"""Salsa20/8 http://en.wikipedia.org/wiki/Salsa20"""
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
R(x, 4, 0,12, 7);R(x, 8, 4, 0, 9);R(x,12, 8, 4,13);R(x, 0,12, 8,18)
R(x, 9, 5, 1, 7);R(x,13, 9, 5, 9);R(x, 1,13, 9,13);R(x, 5, 1,13,18)
R(x,14,10, 6, 7);R(x, 2,14,10, 9);R(x, 6, 2,14,13);R(x,10, 6, 2,18)
R(x, 3,15,11, 7);R(x, 7, 3,15, 9);R(x,11, 7, 3,13);R(x,15,11, 7,18)
R(x, 1, 0, 3, 7);R(x, 2, 1, 0, 9);R(x, 3, 2, 1,13);R(x, 0, 3, 2,18)
R(x, 6, 5, 4, 7);R(x, 7, 6, 5, 9);R(x, 4, 7, 6,13);R(x, 5, 4, 7,18)
R(x,11,10, 9, 7);R(x, 8,11,10, 9);R(x, 9, 8,11,13);R(x,10, 9, 8,18)
R(x,12,15,14, 7);R(x,13,12,15, 9);R(x,14,13,12,13);R(x,15,14,13,18)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
array_overwrite(BY, Yi + (i * 2) * 16, BY, i * 16, 16)
array_overwrite(BY, Yi + (i*2 + 1) * 16, BY, (i + r) * 16, 16)
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
array_overwrite(B, Bi, X, 0, 32 * r) # ROMix - 1
for i in xrange(N): # ROMix - 2
array_overwrite(X, 0, V, i * (32 * r), 32 * r) # ROMix - 3
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
array_overwrite(X, 0, B, Bi, 32 * r) # ROMix - 10
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pypyscrypt.py
|
salsa20_8
|
python
|
def salsa20_8(B, x, src, s_start, dest, d_start):
# Merged blockxor for speed
for i in xrange(16):
x[i] = B[i] = B[i] ^ src[s_start + i]
# This is the actual Salsa 20/8: four identical double rounds
for i in xrange(4):
R(x, 4, 0,12, 7);R(x, 8, 4, 0, 9);R(x,12, 8, 4,13);R(x, 0,12, 8,18)
R(x, 9, 5, 1, 7);R(x,13, 9, 5, 9);R(x, 1,13, 9,13);R(x, 5, 1,13,18)
R(x,14,10, 6, 7);R(x, 2,14,10, 9);R(x, 6, 2,14,13);R(x,10, 6, 2,18)
R(x, 3,15,11, 7);R(x, 7, 3,15, 9);R(x,11, 7, 3,13);R(x,15,11, 7,18)
R(x, 1, 0, 3, 7);R(x, 2, 1, 0, 9);R(x, 3, 2, 1,13);R(x, 0, 3, 2,18)
R(x, 6, 5, 4, 7);R(x, 7, 6, 5, 9);R(x, 4, 7, 6,13);R(x, 5, 4, 7,18)
R(x,11,10, 9, 7);R(x, 8,11,10, 9);R(x, 9, 8,11,13);R(x,10, 9, 8,18)
R(x,12,15,14, 7);R(x,13,12,15, 9);R(x,14,13,12,13);R(x,15,14,13,18)
# While we are handling the data, write it to the correct dest.
# The latter half is still part of salsa20
for i in xrange(16):
dest[d_start + i] = B[i] = (x[i] + B[i]) & 0xffffffff
|
Salsa20/8 http://en.wikipedia.org/wiki/Salsa20
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pypyscrypt.py#L67-L88
|
[
"def R(X, destination, a1, a2, b):\n \"\"\"A single Salsa20 row operation\"\"\"\n\n a = (X[a1] + X[a2]) & 0xffffffff\n X[destination] ^= ((a << b) | (a >> (32 - b)))\n"
] |
# Copyright (c) 2014 Richard Moore
# Copyright (c) 2014-2019 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Python implementation of Scrypt password-based key derivation function"""
# Scrypt definition:
# http://www.tarsnap.com/scrypt/scrypt.pdf
# It was originally written for a pure-Python Litecoin CPU miner:
# https://github.com/ricmoo/nightminer
# Imported to this project from:
# https://github.com/ricmoo/pyscrypt
# And owes thanks to:
# https://github.com/wg/scrypt
from hashlib import pbkdf2_hmac as _pbkdf2
import struct
from . import mcf as mcf_mod
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_DEFAULT, xrange,
check_args)
def array_overwrite(source, s_start, dest, d_start, length):
dest[d_start:d_start + length] = source[s_start:s_start + length]
def blockxor(source, s_start, dest, d_start, length):
for i in xrange(length):
dest[d_start + i] ^= source[s_start + i]
def integerify(B, r):
"""A bijection from ({0, 1} ** k) to {0, ..., (2 ** k) - 1"""
Bi = (2 * r - 1) * 16
return B[Bi]
def R(X, destination, a1, a2, b):
"""A single Salsa20 row operation"""
a = (X[a1] + X[a2]) & 0xffffffff
X[destination] ^= ((a << b) | (a >> (32 - b)))
def blockmix_salsa8(BY, Yi, r):
"""Blockmix; Used by SMix"""
start = (2 * r - 1) * 16
X = BY[start:start+16] # BlockMix - 1
tmp = [0]*16
for i in xrange(2 * r): # BlockMix - 2
#blockxor(BY, i * 16, X, 0, 16) # BlockMix - 3(inner)
salsa20_8(X, tmp, BY, i * 16, BY, Yi + i*16) # BlockMix - 3(outer)
#array_overwrite(X, 0, BY, Yi + (i * 16), 16) # BlockMix - 4
for i in xrange(r): # BlockMix - 6
array_overwrite(BY, Yi + (i * 2) * 16, BY, i * 16, 16)
array_overwrite(BY, Yi + (i*2 + 1) * 16, BY, (i + r) * 16, 16)
def smix(B, Bi, r, N, V, X):
"""SMix; a specific case of ROMix based on Salsa20/8"""
array_overwrite(B, Bi, X, 0, 32 * r) # ROMix - 1
for i in xrange(N): # ROMix - 2
array_overwrite(X, 0, V, i * (32 * r), 32 * r) # ROMix - 3
blockmix_salsa8(X, 32 * r, r) # ROMix - 4
for i in xrange(N): # ROMix - 6
j = integerify(X, r) & (N - 1) # ROMix - 7
blockxor(V, j * (32 * r), X, 0, 32 * r) # ROMix - 8(inner)
blockmix_salsa8(X, 32 * r, r) # ROMix - 9(outer)
array_overwrite(X, 0, B, Bi, 32 * r) # ROMix - 10
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
"""Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
"""
check_args(password, salt, N, r, p, olen)
# Everything is lists of 32-bit uints for all but pbkdf2
try:
B = _pbkdf2('sha256', password, salt, 1, p * 128 * r)
B = list(struct.unpack('<%dI' % (len(B) // 4), B))
XY = [0] * (64 * r)
V = [0] * (32 * r * N)
except (MemoryError, OverflowError):
raise ValueError("scrypt parameters don't fit in memory")
for i in xrange(p):
smix(B, i * 32 * r, r, N, V, XY)
B = struct.pack('<%dI' % len(B), *B)
return _pbkdf2('sha256', password, B, 1, olen)
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
__all__ = ['scrypt', 'scrypt_mcf', 'scrypt_mcf_check']
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
jvarho/pylibscrypt
|
pylibscrypt/pylibscrypt.py
|
scrypt
|
python
|
def scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64):
check_args(password, salt, N, r, p, olen)
out = ctypes.create_string_buffer(olen)
ret = _libscrypt_scrypt(password, len(password), salt, len(salt),
N, r, p, out, len(out))
if ret:
raise ValueError
return out.raw
|
Returns a key derived using the scrypt key-derivarion function
N must be a power of two larger than 1 but no larger than 2 ** 63 (insane)
r and p must be positive numbers such that r * p < 2 ** 30
The default values are:
N -- 2**14 (~16k)
r -- 8
p -- 1
Memory usage is proportional to N*r. Defaults require about 16 MiB.
Time taken is proportional to N*p. Defaults take <100ms of a recent x86.
The last one differs from libscrypt defaults, but matches the 'interactive'
work factor from the original paper. For long term storage where runtime of
key derivation is not a problem, you could use 16 as in libscrypt or better
yet increase N if memory is plentiful.
|
train
|
https://github.com/jvarho/pylibscrypt/blob/f2ff02e49f44aa620e308a4a64dd8376b9510f99/pylibscrypt/pylibscrypt.py#L71-L98
|
[
"def check_args(password, salt, N, r, p, olen=64):\n if not isinstance(password, bytes):\n raise TypeError('password must be a byte string')\n if not isinstance(salt, bytes):\n raise TypeError('salt must be a byte string')\n if not isinstance(N, numbers.Integral):\n raise TypeError('N must be an integer')\n if not isinstance(r, numbers.Integral):\n raise TypeError('r must be an integer')\n if not isinstance(p, numbers.Integral):\n raise TypeError('p must be an integer')\n if not isinstance(olen, numbers.Integral):\n raise TypeError('length must be an integer')\n if N > 2**63:\n raise ValueError('N cannot be larger than 2**63')\n if (N & (N - 1)) or N < 2:\n raise ValueError('N must be a power of two larger than 1')\n if r <= 0:\n raise ValueError('r must be positive')\n if p <= 0:\n raise ValueError('p must be positive')\n if r * p >= 2**30:\n raise ValueError('r * p must be less than 2 ** 30')\n if olen <= 0:\n raise ValueError('length must be positive')\n"
] |
# Copyright (c) 2014-2018, Jan Varho
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""Scrypt implementation that calls into system libscrypt"""
import base64
import ctypes
from ctypes import c_char_p, c_size_t, c_uint64, c_uint32
from ctypes.util import find_library
import os
from .common import (
SCRYPT_N, SCRYPT_r, SCRYPT_p, SCRYPT_MCF_PREFIX_s1,
SCRYPT_MCF_PREFIX_DEFAULT, SCRYPT_MCF_PREFIX_ANY, check_args, unicode)
from . import mcf as mcf_mod
_libscrypt_soname = find_library('scrypt')
if _libscrypt_soname is None:
raise ImportError('Unable to find libscrypt')
try:
_libscrypt = ctypes.CDLL(_libscrypt_soname)
_libscrypt_scrypt = _libscrypt.libscrypt_scrypt
_libscrypt_mcf = _libscrypt.libscrypt_mcf
_libscrypt_check = _libscrypt.libscrypt_check
except OSError:
raise ImportError('Unable to load libscrypt: ' + _libscrypt_soname)
except AttributeError:
raise ImportError('Incompatible libscrypt: ' + _libscrypt_soname)
_libscrypt_scrypt.argtypes = [
c_char_p, # password
c_size_t, # password length
c_char_p, # salt
c_size_t, # salt length
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # out
c_size_t, # out length
]
_libscrypt_mcf.argtypes = [
c_uint64, # N
c_uint32, # r
c_uint32, # p
c_char_p, # salt
c_char_p, # hash
c_char_p, # out (125+ bytes)
]
_libscrypt_check.argtypes = [
c_char_p, # mcf (modified)
c_char_p, # hash
]
def scrypt_mcf(password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p,
prefix=SCRYPT_MCF_PREFIX_DEFAULT):
"""Derives a Modular Crypt Format hash using the scrypt KDF
Parameter space is smaller than for scrypt():
N must be a power of two larger than 1 but no larger than 2 ** 31
r and p must be positive numbers between 1 and 255
Salt must be a byte string 1-16 bytes long.
If no salt is given, a random salt of 128+ bits is used. (Recommended.)
"""
if (prefix != SCRYPT_MCF_PREFIX_s1 and prefix != SCRYPT_MCF_PREFIX_ANY):
return mcf_mod.scrypt_mcf(scrypt, password, salt, N, r, p, prefix)
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if salt is None:
salt = os.urandom(16)
elif not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if N > 2**31:
raise ValueError('N > 2**31 not supported')
if b'\0' in password:
raise ValueError('scrypt_mcf password must not contain zero bytes')
hash = scrypt(password, salt, N, r, p)
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
out = ctypes.create_string_buffer(125)
ret = _libscrypt_mcf(N, r, p, s64, h64, out)
if not ret:
raise ValueError
out = out.raw.strip(b'\0')
# XXX: Hack to support old libscrypt (like in Ubuntu 14.04)
if len(out) == 123:
out = out + b'='
return out
def scrypt_mcf_check(mcf, password):
"""Returns True if the password matches the given MCF hash"""
if not isinstance(mcf, bytes):
raise TypeError('MCF must be a byte string')
if isinstance(password, unicode):
password = password.encode('utf8')
elif not isinstance(password, bytes):
raise TypeError('password must be a unicode or byte string')
if len(mcf) != 124 or b'\0' in password:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
mcfbuf = ctypes.create_string_buffer(mcf)
ret = _libscrypt_check(mcfbuf, password)
if ret < 0:
return mcf_mod.scrypt_mcf_check(scrypt, mcf, password)
return bool(ret)
if __name__ == "__main__":
import sys
from . import tests
tests.run_scrypt_suite(sys.modules[__name__])
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.